blob: da6a2b22adf6e87cc0c973444907fd6940c1eedd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
33#include "ReshapeTestImpl.hpp"
34#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +000082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
83{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
88 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
173 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
250 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000444template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
445 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000446LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
447 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000449 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000450 float qScale,
451 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000452{
telsoa01c577f2c2018-08-31 09:22:23 +0100453 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000454 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000455 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
456 QuantizedVector<T>(qScale, qOffset, {
457 11,21,31,
458 12,22,32,
459 13,23,33
460 })));
461
telsoa01c577f2c2018-08-31 09:22:23 +0100462 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000464 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
465 QuantizedVector<T>(qScale, qOffset, {
466 -11,-21,
467 -12,-22,
468 })));
469
telsoa01c577f2c2018-08-31 09:22:23 +0100470// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000471// Manually calculated like this:
472//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
473//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
474//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
475//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
476//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
477//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
478//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000479 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000480 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
481 QuantizedVector<T>(qScale, qOffset, {
482 0, 0, 0, 0, 0, 0,
483 -242, -594, -934, -372, 0, 0,
484 -495, -1190, -1850, -725, 0, 0,
485 -538, -1256, -1916, -748, 0, 0,
486 -273, -626, -946, -363, 0, 0,
487 0, 0, 0, 0, 0, 0,
488 0, 0, 0, 0, 0, 0,
489 0, 0, 0, 0, 0, 0
490 })));
491
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000492 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
493 workloadFactory,
494 memoryManager,
495 input,
496 kernel,
497 GetBias2<ArmnnBType>(false, qScale, qOffset),
498 expectedOutput,
499 qScale,
500 qOffset,
501 layout,
502 1, // Padding left.
503 2, // Padding top.
504 3, // Padding right.
505 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000506}
507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000508template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
509 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000510LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
511 armnn::IWorkloadFactory& workloadFactory,
512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000513 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000514 float qScale,
515 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000516{
telsoa01c577f2c2018-08-31 09:22:23 +0100517 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000519 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
520 QuantizedVector<T>(qScale, qOffset, {
521 11,21,31,41,51,
522 12,22,32,42,52,
523 13,23,33,43,53,
524 14,24,34,44,54,
525 15,25,35,45,55,
526 })));
527
telsoa01c577f2c2018-08-31 09:22:23 +0100528 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 -11,-21,-31,-41,
533 -12,-22,-32,-42,
534 -13,-23,-33,-43,
535 -14,-24,-34,-44,
536 })));
537
telsoa01c577f2c2018-08-31 09:22:23 +0100538 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000539 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000540 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
541 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
542 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000543 -7140, -10580, -13940, -9300, -5230,
544 -9590, -14120, -18520, -12290, -6860,
545 -9980, -14560, -18960, -12560, -7000,
546 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100547 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000548 })));
549
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000550 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
551 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000552 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000553 input,
554 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000555 GetBias2<ArmnnBType>(false, qScale, qOffset),
telsoa014fcda012018-03-09 14:13:49 +0000556 expectedOutput,
557 qScale,
558 qOffset,
narpra015f703182018-10-26 16:24:58 +0100559 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100560 1, // Padding left.
561 1, // Padding top.
562 2, // Padding right.
563 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100564}
565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000566template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
567 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000568LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
569 armnn::IWorkloadFactory& workloadFactory,
570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
571 float qScale,
572 int32_t qOffset,
573 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000574 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100575{
telsoa01c577f2c2018-08-31 09:22:23 +0100576 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000577 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100578 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
579 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
580 0, 1, 2, 3, 4,
581 5, 6, 7, 8, 9,
582 10, 11, 12, 13, 14,
583 15, 16, 17, 18, 19,
584 20, 21, 22, 23, 24,
585
586 25, 26, 27, 28, 29,
587 30, 31, 32, 33, 34,
588 35, 36, 37, 38, 39,
589 40, 41, 42, 43, 44,
590 45, 46, 47, 48, 49
591 })));
592
telsoa01c577f2c2018-08-31 09:22:23 +0100593 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000594 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100595 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
596 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
597 32, 31, 30, 29,
598 28, 27, 26, 25,
599 24, 23, 22, 21,
600 20, 19, 18, 17,
601
602 16, 15, 14, 13,
603 12, 11, 10, 9,
604 8, 7, 6, 5,
605 4, 3, 2, 1
606 })));
607
telsoa01c577f2c2018-08-31 09:22:23 +0100608 // Expected output is 1 batch of a 2-channel 5x5 image.
609 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000610 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100611 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
612 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
613 1062, 1580, 1850, 1530, 1117,
614 2140, 3108, 3500, 2842, 2042,
615 3580, 5068, 5460, 4342, 3062,
616 3618, 5072, 5390, 4248, 2971,
617 3074, 4282, 4510, 3533, 2457,
618 1550, 2284, 2362, 1955, 1428,
619 2910, 4206, 4342, 3528, 2536,
620 3390, 4886, 5022, 4068, 2916,
621 3566, 5056, 5182, 4133, 2922,
622 3100, 4352, 4452, 3517, 2465
623 })));
624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000625 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
626 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000627 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100628 input,
629 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
surmeh013537c2c2018-05-18 16:31:43 +0100631 expectedOutput,
632 qScale,
633 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100634 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100635 1, // Padding left.
636 1, // Padding top.
637 2, // Padding right.
638 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100639 1, // strideX
640 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000641}
642
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000643template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
644 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000645LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
646 armnn::IWorkloadFactory& workloadFactory,
647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
648 float qScale,
649 int32_t qOffset,
650 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000652 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100653 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
654 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
655 0, 25,
656 1, 26,
657 2, 27,
658 3, 28,
659 4, 29,
660
661 5, 30,
662 6, 31,
663 7, 32,
664 8, 33,
665 9, 34,
666
667 10, 35,
668 11, 36,
669 12, 37,
670 13, 38,
671 14, 39,
672
673 15, 40,
674 16, 41,
675 17, 42,
676 18, 43,
677 19, 44,
678
679 20, 45,
680 21, 46,
681 22, 47,
682 23, 48,
683 24, 49
684 })));
685
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000686 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100687 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
688 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000689 32, 31, 30, 29,
690 28, 27, 26, 25,
691 24, 23, 22, 21,
692 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100693
Matteo Martincigh747ef822018-12-18 09:26:39 +0000694 16, 15, 14, 13,
695 12, 11, 10, 9,
696 8, 7, 6, 5,
697 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100698 })));
699
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000700 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100701 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
702 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
703 1062, 1550,
704 1580, 2284,
705 1850, 2362,
706 1530, 1955,
707 1117, 1428,
708
709 2140, 2910,
710 3108, 4206,
711 3500, 4342,
712 2842, 3528,
713 2042, 2536,
714
715 3580, 3390,
716 5068, 4886,
717 5460, 5022,
718 4342, 4068,
719 3062, 2916,
720
721 3618, 3566,
722 5072, 5056,
723 5390, 5182,
724 4248, 4133,
725 2971, 2922,
726
727 3074, 3100,
728 4282, 4352,
729 4510, 4452,
730 3533, 3517,
731 2457, 2465
732 })));
733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000734 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
735 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100737 input,
738 kernel,
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000739 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100740 expectedOutput,
741 qScale,
742 qOffset,
743 1, // Padding left.
744 1, // Padding top.
745 2, // Padding right.
746 2, // Padding bottom.
747 1, // strideX
748 1); // strideY
749}
750
Bruno Goncalves22972f02019-04-26 21:03:24 -0300751template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
752 typename T = armnn::ResolveType<ArmnnType>>
753LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
754 armnn::IWorkloadFactory& workloadFactory,
755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
756 float qScale,
757 int32_t qOffset,
758 bool biasEnabled)
759{
760 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
761 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
762 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
763 0, 0, 0, 0, 0, 0, 0, 0, 0,
764 0, 0, 0, 0, 0, 0, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 1, 1, 1, 0, 0, 0,
767 0, 0, 0, 1, 1, 1, 0, 0, 0,
768 0, 0, 0, 1, 1, 1, 0, 0, 0,
769 0, 0, 0, 0, 0, 0, 0, 0, 0,
770 0, 0, 0, 0, 0, 0, 0, 0, 0,
771 0, 0, 0, 0, 0, 0, 0, 0, 0
772 })));
773
774 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
775 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
776 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
777 1, 2, 3,
778 4, 5, 6,
779 7, 8, 9
780 })));
781
782 uint32_t padLeft = 0;
783 uint32_t padTop = 0;
784 uint32_t padRight = 0;
785 uint32_t padBottom = 0;
786 uint32_t strideX = 1;
787 uint32_t strideY = 1;
788 uint32_t dilationX = 3;
789 uint32_t dilationY = 3;
790
791 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
792 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
793 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
794 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
795 5, 5, 5,
796 5, 5, 5,
797 5, 5, 5
798 })));
799
800 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
801 workloadFactory,
802 memoryManager,
803 input,
804 kernel,
805 GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
806 expectedOutput,
807 qScale,
808 qOffset,
809 padLeft,
810 padTop,
811 padRight,
812 padBottom,
813 strideX,
814 strideY,
815 dilationX,
816 dilationY);
817
818}
819
telsoa014fcda012018-03-09 14:13:49 +0000820LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000821Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
822 armnn::IWorkloadFactory& workloadFactory,
823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000824 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000825{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000826 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
827 <armnn::DataType::Float32, armnn::DataType::Float32>(
828 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000829}
830
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000831LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
832 armnn::IWorkloadFactory& workloadFactory,
833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000834 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000836 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000837 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000838}
839
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000840LayerTestResult<float, 4> DepthwiseConvolution2dTest(
841 armnn::IWorkloadFactory& workloadFactory,
842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
843 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000844 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000845{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000846 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000847 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000848}
849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
851 armnn::IWorkloadFactory& workloadFactory,
852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
853 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100854{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
856 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100857}
858
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000859LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
860 armnn::IWorkloadFactory& workloadFactory,
861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
862 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000863 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000864{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000865 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000866 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000867}
868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000869LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
870 armnn::IWorkloadFactory& workloadFactory,
871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
872 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000873 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100874{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000875 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000876 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100877}
878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000879LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
880 armnn::IWorkloadFactory& workloadFactory,
881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
882 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000883 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000885 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000887}
888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000889LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
890 armnn::IWorkloadFactory& workloadFactory,
891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
892 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000893 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000895 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000896 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000897}
898
Bruno Goncalves22972f02019-04-26 21:03:24 -0300899LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
902{
903 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
904 workloadFactory,
905 memoryManager,
906 0.f,
907 0,
908 false);
909}
910
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000911LayerTestResult<float, 4> Convolution1dTest(
912 armnn::IWorkloadFactory& workloadFactory,
913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
914 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000915{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000916 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
917 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000918}
919
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000920LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
921 armnn::IWorkloadFactory& workloadFactory,
922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
923 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000925 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
926 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000927}
928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000929LayerTestResult<float,4> CompareConvolution2dTest(
930 armnn::IWorkloadFactory& workloadFactory,
931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
932 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000933{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000934 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
935 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000936}
937
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000938LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000939 armnn::IWorkloadFactory& workloadFactory,
940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
941 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000942 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000943{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000944 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
945 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000946}
947
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000948LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
949 armnn::IWorkloadFactory& workloadFactory,
950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
951 armnn::IWorkloadFactory& refWorkloadFactory,
952 const armnn::DataLayout layout)
953{
954 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
955 workloadFactory, memoryManager, refWorkloadFactory, layout);
956}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000957
958LayerTestResult<float,4> SimpleNormalizationAcrossTest(
959 armnn::IWorkloadFactory& workloadFactory,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000961{
962 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
963 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000964 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000965}
966
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000967LayerTestResult<float,4> SimpleNormalizationWithinTest(
968 armnn::IWorkloadFactory& workloadFactory,
969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000970{
971 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
972 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000973 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000974}
975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000976LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
977 armnn::IWorkloadFactory& workloadFactory,
978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100979{
980 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
981 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000982 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100983}
984
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000985LayerTestResult<float,2> SimpleSoftmaxTest(
986 armnn::IWorkloadFactory& workloadFactory,
987 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
988 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000989{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000990 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000991}
992
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000993LayerTestResult<float,3> Simple3dSoftmaxTest(
994 armnn::IWorkloadFactory& workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
996 float beta)
997{
998 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
999}
1000
1001LayerTestResult<float,4> Simple4dSoftmaxTest(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004 float beta)
1005{
1006 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1007}
1008
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001009LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1010 armnn::IWorkloadFactory& workloadFactory,
1011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1012 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001013{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001014 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001015}
1016
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001017LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1018 armnn::IWorkloadFactory& workloadFactory,
1019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1020 float beta)
1021{
1022 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1023}
1024
1025LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1026 armnn::IWorkloadFactory& workloadFactory,
1027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1028 float beta)
1029{
1030 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1031}
1032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001033LayerTestResult<float,4> CompareNormalizationTest(
1034 armnn::IWorkloadFactory& workloadFactory,
1035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1036 armnn::IWorkloadFactory& refWorkloadFactory,
1037 armnn::NormalizationAlgorithmChannel normChannel,
1038 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001039{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001040 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001041}
1042
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001043LayerTestResult<float,2> CompareSoftmaxTest(
1044 armnn::IWorkloadFactory& workloadFactory,
1045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001046 armnn::IWorkloadFactory& refWorkloadFactory,
1047 float beta)
1048{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001049 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1050 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001051}
1052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001053LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001056 armnn::IWorkloadFactory& refWorkloadFactory,
1057 float beta)
1058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001059 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1060 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001061}
1062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001063std::vector<LayerTestResult<float,3>> SplitterTest(
1064 armnn::IWorkloadFactory& workloadFactory,
1065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001066{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001067 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001068}
1069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001070std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1071 armnn::IWorkloadFactory& workloadFactory,
1072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001074 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001075}
1076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001077LayerTestResult<float, 3> CopyViaSplitterTest(
1078 armnn::IWorkloadFactory& workloadFactory,
1079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001081 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001082}
1083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001084LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1085 armnn::IWorkloadFactory& workloadFactory,
1086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001087{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001089}
1090
telsoa01c577f2c2018-08-31 09:22:23 +01001091LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001092 armnn::IWorkloadFactory& workloadFactory,
1093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001094{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001095 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001096 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1097 { 2., 3., 3., 4. }));
1098
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001099 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001100 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1101 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1102 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001103 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001104 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001105}
1106
1107LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001108 armnn::IWorkloadFactory& workloadFactory,
1109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001110{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001111 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001112 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1113 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1114 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001116 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001117 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1118 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1119 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1120 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1121 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1122 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1123 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1124 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001125 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1126 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001127}
1128
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001129LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1130 armnn::IWorkloadFactory& workloadFactory,
1131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001132{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001133 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001134 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1135 {2., 3., 3., 4.}));
1136
1137
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001138 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001139 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1140 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1141 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1142
Conor Kennedyb9971c92019-05-07 07:14:23 +01001143 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001144 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001145}
1146
Conor Kennedyb9971c92019-05-07 07:14:23 +01001147LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1148 armnn::IWorkloadFactory& workloadFactory,
1149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1150{
1151 const float qScale = 1.0f;
1152 const int32_t qOffset = 0;
1153
1154 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1155 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1156
1157 armnn::TensorInfo inputDesc({2, 2}, datatype);
1158 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1159 std::vector<float>{2., 3., 3., 4.}));
1160
1161 armnn::TensorInfo outputDesc({2, 4}, datatype);
1162 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1163 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1164 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1165
1166 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1167 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1168
1169}
1170
1171LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1172 armnn::IWorkloadFactory& workloadFactory,
1173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1174{
1175 const float qScale = 1.0f;
1176 const int32_t qOffset = 0;
1177
1178 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1179 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1180
1181 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1182 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1183 std::vector<float>({ 2., 3., 3., 4. })));
1184
1185 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1186 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1187 qOffset, std::vector<float>(
1188 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1189 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1190
1191 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1192 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1193}
1194
1195LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1196 armnn::IWorkloadFactory& workloadFactory,
1197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1198{
1199 const float qScale = 2.0f;
1200 const int32_t qOffset = 0;
1201
1202 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1203 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1204
1205 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1206 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1207 qOffset, std::vector<float>(
1208 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1209 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1210
1211 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1212 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1213 qOffset, std::vector<float>(
1214 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1215 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1216 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1217 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1218 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1219 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1220
1221 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1222 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1223}
1224
1225LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1226 armnn::IWorkloadFactory& workloadFactory,
1227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1228{
1229 const float qScale = 1.0f;
1230 const int32_t qOffset = 0;
1231
1232 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1233
1234 armnn::TensorInfo inputDesc({2, 2}, datatype);
1235 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1236 qOffset, std::vector<float>{2., 3., 3., 4.}));
1237
1238 armnn::TensorInfo outputDesc({2, 4}, datatype);
1239 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1240 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1241 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1242
1243 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1244 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1245}
1246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001247LayerTestResult<float,3> MergerTest(
1248 armnn::IWorkloadFactory& workloadFactory,
1249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001250{
surmeh013537c2c2018-05-18 16:31:43 +01001251 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001252 unsigned int outputHeight = 6;
1253 unsigned int outputChannels = 3;
1254
surmeh013537c2c2018-05-18 16:31:43 +01001255 unsigned int inputWidth1 = 3;
1256 unsigned int inputHeight1 = 6;
1257 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001258
surmeh013537c2c2018-05-18 16:31:43 +01001259 unsigned int inputWidth2 = 3;
1260 unsigned int inputHeight2 = 6;
1261 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001262
telsoa01c577f2c2018-08-31 09:22:23 +01001263 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001264 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1265 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1266 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001267
1268 LayerTestResult<float,3> ret(outputTensorInfo);
1269
telsoa014fcda012018-03-09 14:13:49 +00001270 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001271 {
1272 1.0f, 2.0f, 3.0f,
1273 4.0f, 5.0f, 6.0f,
1274 7.0f, 8.0f, 9.0f,
1275 10.0f, 11.0f, 12.0f,
1276 13.0f, 14.0f, 15.0f,
1277 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001278
surmeh013537c2c2018-05-18 16:31:43 +01001279 19.0f, 20.0f, 21.0f,
1280 22.0f, 23.0f, 24.0f,
1281 25.0f, 26.0f, 27.0f,
1282 28.0f, 29.0f, 30.0f,
1283 31.0f, 32.0f, 33.0f,
1284 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001285
surmeh013537c2c2018-05-18 16:31:43 +01001286 37.0f, 38.0f, 39.0f,
1287 40.0f, 41.0f, 42.0f,
1288 43.0f, 44.0f, 45.0f,
1289 46.0f, 47.0f, 48.0f,
1290 49.0f, 50.0f, 51.0f,
1291 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001292 })
1293 );
1294
telsoa014fcda012018-03-09 14:13:49 +00001295 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1296 {
surmeh013537c2c2018-05-18 16:31:43 +01001297 1.0f, 2.0f, 3.0f,
1298 4.0f, 5.0f, 6.0f,
1299 7.0f, 8.0f, 9.0f,
1300 10.0f, 11.0f, 12.0f,
1301 13.0f, 14.0f, 15.0f,
1302 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001303
surmeh013537c2c2018-05-18 16:31:43 +01001304 19.0f, 20.0f, 21.0f,
1305 22.0f, 23.0f, 24.0f,
1306 25.0f, 26.0f, 27.0f,
1307 28.0f, 29.0f, 30.0f,
1308 31.0f, 32.0f, 33.0f,
1309 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001310 })
1311 );
1312
1313 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1314 {
surmeh013537c2c2018-05-18 16:31:43 +01001315 37.0f, 38.0f, 39.0f,
1316 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001317 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001318 46.0f, 47.0f, 48.0f,
1319 49.0f, 50.0f, 51.0f,
1320 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001321 })
1322 );
1323
telsoa01c577f2c2018-08-31 09:22:23 +01001324 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001325 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1326
telsoa01c577f2c2018-08-31 09:22:23 +01001327 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001328 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1329
telsoa014fcda012018-03-09 14:13:49 +00001330 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1331
1332 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1333
1334 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1335 subTensorsSupported ?
1336 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1337 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1338
1339 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1340 subTensorsSupported ?
1341 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1342 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1343
telsoa014fcda012018-03-09 14:13:49 +00001344 armnn::MergerQueueDescriptor data;
1345 armnn::WorkloadInfo info;
1346 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1347 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001348 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1349
1350 data.m_ViewOrigins.push_back(window1);
1351 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001352
1353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1354
1355 inputHandle1->Allocate();
1356 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001357 outputHandle->Allocate();
1358
1359 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1360 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001361
Derek Lambertif30f7d32019-04-09 10:25:02 +01001362 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001363 workload->Execute();
1364
1365 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1366
1367 return ret;
1368}
1369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001370LayerTestResult<float,4> AdditionTest(
1371 armnn::IWorkloadFactory& workloadFactory,
1372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001373{
1374 unsigned int batchSize = 2;
1375 unsigned int channels = 2;
1376 unsigned int height = 2;
1377 unsigned int width = 3;
1378
1379 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1380 armnn::TensorInfo outputTensorInfo;
1381
1382 unsigned int shape[] = {batchSize, channels, height, width};
1383
1384 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1385 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1386 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1387
1388
1389 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1390 {
1391 0.0f, 2.0f, 1.0f,
1392 0.2f, 1.0f, 2.0f,
1393
1394 1.0f, 2.0f, 1.0f,
1395 0.2f, 1.0f, 2.0f,
1396
1397 0.0f, 2.0f, 1.0f,
1398 4.2f, 1.0f, 2.0f,
1399
1400 0.0f, 0.0f, 1.0f,
1401 0.2f, 1.0f, 2.0f,
1402 }));
1403
1404 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1405 {
1406 1.0f, 2.0f, 1.0f,
1407 0.0f, 1.0f, 2.0f,
1408
1409 1.0f, 2.0f, -2.0f,
1410 0.2f, 1.0f, 2.0f,
1411
1412 0.0f, 2.0f, 1.0f,
1413 4.2f, 0.0f, -3.0f,
1414
1415 0.0f, 0.0f, 1.0f,
1416 0.7f, 1.0f, 5.0f,
1417 }));
1418
1419 LayerTestResult<float,4> ret(outputTensorInfo);
1420 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1421 {
1422 1.0f, 4.0f, 2.0f,
1423 0.2f, 2.0f, 4.0f,
1424
1425 2.0f, 4.0f, -1.0f,
1426 0.4f, 2.0f, 4.0f,
1427
1428 0.0f, 4.0f, 2.0f,
1429 8.4f, 1.0f, -1.0f,
1430
1431 0.0f, 0.0f, 2.0f,
1432 0.9f, 2.0f, 7.0f,
1433 }));
1434
1435 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1436 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1437 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1438
1439 armnn::AdditionQueueDescriptor data;
1440 armnn::WorkloadInfo info;
1441 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1442 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1443 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1444
1445 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1446
1447 inputHandle1->Allocate();
1448 inputHandle2->Allocate();
1449 outputHandle->Allocate();
1450
1451 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1452 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1453
Derek Lambertif30f7d32019-04-09 10:25:02 +01001454 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001455 workload->Execute();
1456
1457 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1458
1459 return ret;
1460}
1461
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001462template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001463LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1464 armnn::IWorkloadFactory& workloadFactory,
1465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001466 float qScale,
1467 int32_t qOffset)
1468{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001469 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1470 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1471 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001472
1473 if (armnn::IsQuantizedType<T>())
1474 {
1475 inputTensorInfo1.SetQuantizationScale(qScale);
1476 inputTensorInfo1.SetQuantizationOffset(qOffset);
1477 inputTensorInfo2.SetQuantizationScale(qScale);
1478 inputTensorInfo2.SetQuantizationOffset(qOffset);
1479 outputTensorInfo.SetQuantizationScale(qScale);
1480 outputTensorInfo.SetQuantizationOffset(qOffset);
1481 }
1482
1483 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1484 {
1485 0.0f,
1486 1.0f,
1487
1488 2.0f,
1489 3.0f,
1490
1491 4.0f,
1492 5.0f,
1493 }));
1494
1495 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1496 {
1497 0.5f, 1.5f, 2.5f,
1498 3.5f, 4.5f, 5.5f,
1499 }));
1500
1501 LayerTestResult<T,4> ret(outputTensorInfo);
1502 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1503 {
1504 0.5f, 1.5f, 2.5f,
1505 4.5f, 5.5f, 6.5f,
1506
1507 2.5f, 3.5f, 4.5f,
1508 6.5f, 7.5f, 8.5f,
1509
1510 4.5f, 5.5f, 6.5f,
1511 8.5f, 9.5f, 10.5f,
1512 }));
1513
1514 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1515 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1517
1518 armnn::AdditionQueueDescriptor data;
1519 armnn::WorkloadInfo info;
1520 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1521 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1522 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1523
1524 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1525
1526 inputHandle1->Allocate();
1527 inputHandle2->Allocate();
1528 outputHandle->Allocate();
1529
1530 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1531 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1532
Derek Lambertif30f7d32019-04-09 10:25:02 +01001533 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001534 workload->Execute();
1535
1536 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1537
1538 return ret;
1539}
1540
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001541template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001542LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001545 float qScale,
1546 int32_t qOffset)
1547{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001548 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1549 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1550 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001551
1552 if (armnn::IsQuantizedType<T>())
1553 {
1554 inputTensorInfo1.SetQuantizationScale(qScale);
1555 inputTensorInfo1.SetQuantizationOffset(qOffset);
1556 inputTensorInfo2.SetQuantizationScale(qScale);
1557 inputTensorInfo2.SetQuantizationOffset(qOffset);
1558 outputTensorInfo.SetQuantizationScale(qScale);
1559 outputTensorInfo.SetQuantizationOffset(qOffset);
1560 }
1561
1562 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1563 {
1564 0.0f, 1.0f, 2.0f,
1565 3.0f, 4.0f, 5.0f,
1566 6.0f, 7.0f, 8.0f,
1567 9.0f, 10.0f, 11.0f,
1568 12.0f, 13.0f, 14.0f,
1569 15.0f, 16.0f, 17.0f,
1570 }));
1571
1572 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1573 {
1574 0.5f,
1575 }));
1576
1577 LayerTestResult<T,4> ret(outputTensorInfo);
1578 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1579 {
1580 0.5f, 1.5f, 2.5f,
1581 3.5f, 4.5f, 5.5f,
1582 6.5f, 7.5f, 8.5f,
1583 9.5f, 10.5f, 11.5f,
1584 12.5f, 13.5f, 14.5f,
1585 15.5f, 16.5f, 17.5f,
1586 }));
1587
1588 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1589 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1590 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1591
1592 armnn::AdditionQueueDescriptor data;
1593 armnn::WorkloadInfo info;
1594 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1595 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1596 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1597
1598 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1599
1600 inputHandle1->Allocate();
1601 inputHandle2->Allocate();
1602 outputHandle->Allocate();
1603
1604 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1605 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1606
Derek Lambertif30f7d32019-04-09 10:25:02 +01001607 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001608 workload->Execute();
1609
1610 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1611
1612 return ret;
1613}
1614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001615LayerTestResult<float, 4> AdditionBroadcastTest(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001618{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001619 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1620 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001621}
1622
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001623LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1624 armnn::IWorkloadFactory& workloadFactory,
1625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001626{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001627 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1628 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001629}
1630
Sadik Armagan2999a022019-04-09 14:20:12 +01001631LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1632 armnn::IWorkloadFactory& workloadFactory,
1633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1634{
1635 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1636 workloadFactory, memoryManager, 2.f, 0);
1637}
1638
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001639LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1640 armnn::IWorkloadFactory& workloadFactory,
1641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001643 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1644 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001645}
1646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001647LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1648 armnn::IWorkloadFactory& workloadFactory,
1649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001651 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1652 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001653}
1654
Sadik Armagan2999a022019-04-09 14:20:12 +01001655LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1656 armnn::IWorkloadFactory& workloadFactory,
1657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1658{
1659 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1660 workloadFactory, memoryManager, 0.1333333f, 0);
1661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> CompareAdditionTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1666 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001667{
1668 unsigned int batchSize = 4;
1669 unsigned int channels = 1;
1670 unsigned int height = 2;
1671 unsigned int width = 3;
1672
1673 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1674 armnn::TensorInfo outputTensorInfo;
1675
1676 unsigned int shape[] = {batchSize, channels, height, width};
1677
1678 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1679 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1680 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1681
1682 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1683 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1684
1685 LayerTestResult<float,4> ret(outputTensorInfo);
1686
1687 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1688 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1689 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1690
1691 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1692 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1693 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1694
1695 armnn::AdditionQueueDescriptor data;
1696 armnn::WorkloadInfo info;
1697 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1698 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1699 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1700
1701 armnn::AdditionQueueDescriptor refData = data;
1702 armnn::WorkloadInfo refInfo = info;
1703 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1704 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1705 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1706
1707 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1708 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1709
1710 inputHandle1->Allocate();
1711 inputHandle2->Allocate();
1712 outputHandle->Allocate();
1713 inputHandle1Ref->Allocate();
1714 inputHandle2Ref->Allocate();
1715 outputHandleRef->Allocate();
1716
1717 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1718 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1719 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1720 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1721
Derek Lambertif30f7d32019-04-09 10:25:02 +01001722 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001723 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01001724 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001725 workloadRef->Execute();
1726
1727 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1728 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1729
1730 return ret;
1731}
1732
surmeh01bceff2f2018-03-29 16:29:27 +01001733namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001734template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001735LayerTestResult<T, 4> DivisionTestHelper(
1736 armnn::IWorkloadFactory& workloadFactory,
1737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1738 const unsigned int shape0[4],
1739 const std::vector<T>& values0,
1740 float scale0,
1741 int32_t offset0,
1742 const unsigned int shape1[4],
1743 const std::vector<T> & values1,
1744 float scale1,
1745 int32_t offset1,
1746 const unsigned int outShape[4],
1747 const std::vector<T> & outValues,
1748 float outScale,
1749 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001750{
Sadik Armagan2999a022019-04-09 14:20:12 +01001751 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1752 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1753 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001754
David Beck5cd01f32018-09-12 16:00:08 +01001755 inputTensorInfo0.SetQuantizationScale(scale0);
1756 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001757
David Beck5cd01f32018-09-12 16:00:08 +01001758 inputTensorInfo1.SetQuantizationScale(scale1);
1759 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001760
David Beck5cd01f32018-09-12 16:00:08 +01001761 outputTensorInfo.SetQuantizationScale(outScale);
1762 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001763
David Beck5cd01f32018-09-12 16:00:08 +01001764 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1765 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001766
David Beck5cd01f32018-09-12 16:00:08 +01001767 LayerTestResult<T, 4> result(outputTensorInfo);
1768 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001769
David Beck5cd01f32018-09-12 16:00:08 +01001770 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1771 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001773
David Beck5cd01f32018-09-12 16:00:08 +01001774 armnn::DivisionQueueDescriptor data;
1775 armnn::WorkloadInfo info;
1776 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1777 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1778 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001779
David Beck5cd01f32018-09-12 16:00:08 +01001780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001781
David Beck5cd01f32018-09-12 16:00:08 +01001782 inputHandle0->Allocate();
1783 inputHandle1->Allocate();
1784 outputHandle->Allocate();
1785
1786 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1787 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1788
Derek Lambertif30f7d32019-04-09 10:25:02 +01001789 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01001790 workload->Execute();
1791
1792 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1793
1794 return result;
1795}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001796} // anonymous namespace
1797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001798LayerTestResult<float,4> DivisionByZeroTest(
1799 armnn::IWorkloadFactory& workloadFactory,
1800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001801{
1802 const unsigned int width = 2;
1803 const unsigned int height = 2;
1804 const unsigned int channelCount = 2;
1805 const unsigned int batchSize = 2;
1806
1807 unsigned int shape[] = { batchSize, channelCount, height, width };
1808
1809 std::vector<float> input0({
1810 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1811 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1812
1813 std::vector<float> input1({
1814 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1815 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1816
1817 std::vector<float> output({
1818 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1819 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1820
Sadik Armagan2999a022019-04-09 14:20:12 +01001821 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1822 memoryManager,
1823 shape, input0, 1.0f, 0,
1824 shape, input1, 1.0f, 0,
1825 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001826}
1827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001828LayerTestResult<float,4> DivisionTest(
1829 armnn::IWorkloadFactory& workloadFactory,
1830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001831{
1832 const unsigned int width = 2;
1833 const unsigned int height = 2;
1834 const unsigned int channelCount = 2;
1835 const unsigned int batchSize = 2;
1836
1837 unsigned int shape[] = { batchSize, channelCount, height, width };
1838
1839 std::vector<float> input0({
1840 2, 2, 2, 2, 3, 3, 3, 3,
1841 4, 4, 4, 4, 5, 5, 5, 5 });
1842
1843 std::vector<float> input1({
1844 1, 1, 1, 1, 2, 2, 2, 2,
1845 4, 4, 4, 4, 4, 4, 4, 4 });
1846
1847 std::vector<float> output({
1848 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1849 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1850
David Beck5cd01f32018-09-12 16:00:08 +01001851
Sadik Armagan2999a022019-04-09 14:20:12 +01001852 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1853 memoryManager,
1854 shape, input0, 1.0f, 0,
1855 shape, input1, 1.0f, 0,
1856 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001857}
1858
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001859LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1860 armnn::IWorkloadFactory& workloadFactory,
1861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001862{
1863 unsigned int shape0[] = { 1, 2, 2, 2 };
1864 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1865
1866 unsigned int shape1[] = { 1, 1, 1, 1 };
1867 std::vector<float> input1({ 2 });
1868
1869 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1870
David Beck5cd01f32018-09-12 16:00:08 +01001871
Sadik Armagan2999a022019-04-09 14:20:12 +01001872 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1873 memoryManager,
1874 shape0, input0, 1.0f, 0,
1875 shape1, input1, 1.0f, 0,
1876 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001877}
1878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001879LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1880 armnn::IWorkloadFactory& workloadFactory,
1881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001882{
1883 unsigned int shape0[] = { 1, 3, 3, 2 };
1884 std::vector<float> input0({
1885 1, 4, 3, 8, 5, 12,
1886 7, 16, 9, 20, 11, 24,
1887 13, 28, 15, 32, 17, 36});
1888
1889 unsigned int shape1[] = { 1, 1, 1, 2 };
1890 std::vector<float> input1({ 1, 2 });
1891
1892 std::vector<float> output({
1893 1, 2, 3, 4, 5, 6,
1894 7, 8, 9, 10, 11, 12,
1895 13, 14, 15, 16, 17, 18});
1896
Sadik Armagan2999a022019-04-09 14:20:12 +01001897 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1898 memoryManager,
1899 shape0, input0, 1.0f, 0,
1900 shape1, input1, 1.0f, 0,
1901 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001902}
1903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001904LayerTestResult<uint8_t,4> DivisionUint8Test(
1905 armnn::IWorkloadFactory& workloadFactory,
1906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001907{
1908 const unsigned int width = 2;
1909 const unsigned int height = 2;
1910 const unsigned int channelCount = 2;
1911 const unsigned int batchSize = 2;
1912
1913 unsigned int shape[] = { batchSize, channelCount, height, width };
1914
1915 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1916 4, 4, 4, 4, 5, 5, 5, 5 });
1917
1918 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1919 4, 4, 4, 4, 4, 4, 4, 4 });
1920
1921 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1922 4, 4, 4, 4, 5, 5, 5, 5});
1923
1924
Sadik Armagan2999a022019-04-09 14:20:12 +01001925 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1926 memoryManager,
1927 shape, input0, 1.0f, 0,
1928 shape, input1, 1.0f, 0,
1929 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001930}
1931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001932LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1933 armnn::IWorkloadFactory& workloadFactory,
1934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001935{
1936 unsigned int shape0[] = { 1, 2, 2, 2 };
1937 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1938
1939 unsigned int shape1[] = { 1, 1, 1, 1 };
1940 std::vector<uint8_t> input1({ 2 });
1941
1942 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1943
Sadik Armagan2999a022019-04-09 14:20:12 +01001944 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1945 memoryManager,
1946 shape0, input0, 1.0f, 0,
1947 shape1, input1, 1.0f, 0,
1948 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001949}
1950
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001951LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1952 armnn::IWorkloadFactory& workloadFactory,
1953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001954{
1955 unsigned int shape0[] = { 1, 3, 3, 2 };
1956 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1957 7, 16, 9, 20, 11, 24,
1958 13, 28, 15, 32, 17, 36});
1959
1960 unsigned int shape1[] = { 1, 1, 1, 2 };
1961 std::vector<uint8_t> input1({ 1, 2 });
1962
1963 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1964 7, 8, 9, 10, 11, 12,
1965 13, 14, 15, 16, 17, 18});
1966
Sadik Armagan2999a022019-04-09 14:20:12 +01001967 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1968 memoryManager,
1969 shape0, input0, 1.0f, 0,
1970 shape1, input1, 1.0f, 0,
1971 shape0, output, 1.0f, 0);
1972}
1973
1974LayerTestResult<int16_t,4> DivisionInt16Test(
1975 armnn::IWorkloadFactory& workloadFactory,
1976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1977{
1978 unsigned int shape[] = { 2, 2, 2, 2 };
1979
1980 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1981 4, 4, 4, 4, 5, 5, 5, 5 });
1982
1983 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1984 4, 4, 4, 4, 4, 4, 4, 4 });
1985
1986 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1987 4, 4, 4, 4, 5, 5, 5, 5});
1988
1989
1990 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
1991 memoryManager,
1992 shape, input0, 1.0f, 0,
1993 shape, input1, 1.0f, 0,
1994 shape, output, 0.25f, 0);
1995}
1996
1997LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
1998 armnn::IWorkloadFactory& workloadFactory,
1999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2000{
2001 unsigned int shape0[] = { 1, 2, 2, 2 };
2002 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2003
2004 unsigned int shape1[] = { 1, 1, 1, 1 };
2005 std::vector<int16_t> input1({ 2 });
2006
2007 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2008
2009 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2010 memoryManager,
2011 shape0, input0, 1.0f, 0,
2012 shape1, input1, 1.0f, 0,
2013 shape0, output, 1.0f, 0);
2014}
2015
2016LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2017 armnn::IWorkloadFactory& workloadFactory,
2018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2019{
2020 unsigned int shape0[] = { 1, 3, 3, 2 };
2021 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2022 7, 16, 9, 20, 11, 24,
2023 13, 28, 15, 32, 17, 36});
2024
2025 unsigned int shape1[] = { 1, 1, 1, 2 };
2026 std::vector<int16_t> input1({ 1, 2 });
2027
2028 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2029 7, 8, 9, 10, 11, 12,
2030 13, 14, 15, 16, 17, 18});
2031
2032 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2033 memoryManager,
2034 shape0, input0, 1.0f, 0,
2035 shape1, input1, 1.0f, 0,
2036 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002037}
2038
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002039template<typename DescriptorType>
2040std::unique_ptr<armnn::IWorkload> CreateWorkload(
2041 const armnn::IWorkloadFactory& workloadFactory,
2042 const armnn::WorkloadInfo& info,
2043 const DescriptorType& descriptor)
2044{
2045 return CreateWorkload(workloadFactory, info, descriptor);
2046};
2047
2048template<>
2049std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2050 const armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::WorkloadInfo& info,
2052 const armnn::MaximumQueueDescriptor& descriptor)
2053{
2054 return workloadFactory.CreateMaximum(descriptor, info);
2055}
2056
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002057template<>
2058std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2059 const armnn::IWorkloadFactory& workloadFactory,
2060 const armnn::WorkloadInfo& info,
2061 const armnn::MinimumQueueDescriptor& descriptor)
2062{
2063 return workloadFactory.CreateMinimum(descriptor, info);
2064}
2065
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002066template<>
2067std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2068 const armnn::IWorkloadFactory& workloadFactory,
2069 const armnn::WorkloadInfo& info,
2070 const armnn::EqualQueueDescriptor& descriptor)
2071{
2072 return workloadFactory.CreateEqual(descriptor, info);
2073}
2074
FrancisMurtagh878f0232018-12-19 10:56:15 +00002075template<>
2076std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2077 const armnn::IWorkloadFactory& workloadFactory,
2078 const armnn::WorkloadInfo& info,
2079 const armnn::GreaterQueueDescriptor& descriptor)
2080{
2081 return workloadFactory.CreateGreater(descriptor, info);
2082}
2083
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002084namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002085
2086template <typename Descriptor,
2087 armnn::DataType ArmnnTypeInput,
2088 armnn::DataType ArmnnTypeOutput,
2089 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2090 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2091LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2092 armnn::IWorkloadFactory & workloadFactory,
2093 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2094 const unsigned int shape0[4], std::vector<TInput> values0,
2095 const unsigned int shape1[4], std::vector<TInput> values1,
2096 const unsigned int outShape[4], std::vector<TOutput> outValues,
2097 float qScale = 0.0f, int qOffset = 0)
2098{
2099 const size_t dimensionCount = 4;
2100 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2101 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2102 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2103
2104 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2105 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2106
2107 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002108 {
kevmay012b4d88e2019-01-24 14:05:09 +00002109 inputTensorInfo0.SetQuantizationScale(qScale);
2110 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002111
kevmay012b4d88e2019-01-24 14:05:09 +00002112 inputTensorInfo1.SetQuantizationScale(qScale);
2113 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002114
kevmay012b4d88e2019-01-24 14:05:09 +00002115 outputTensorInfo.SetQuantizationScale(qScale);
2116 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002117 }
kevmay012b4d88e2019-01-24 14:05:09 +00002118
2119 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2120
2121 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2122 {
2123 ret.compareBoolean = true;
2124 }
2125
2126 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2127 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2128 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2129
2130 Descriptor data;
2131 armnn::WorkloadInfo info;
2132 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2133 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2134 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2135 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2136
2137 inputHandle0->Allocate();
2138 inputHandle1->Allocate();
2139 outputHandle->Allocate();
2140
2141 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2142 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2143
Derek Lambertif30f7d32019-04-09 10:25:02 +01002144 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002145 ExecuteWorkload(*workload, memoryManager);
2146
2147 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2148
2149 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2150 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002151}
2152
kevmay012b4d88e2019-01-24 14:05:09 +00002153template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2154LayerTestResult<T, 4> ElementwiseTestHelper(
2155 armnn::IWorkloadFactory & workloadFactory,
2156 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2157 const unsigned int shape0[4], std::vector<T> values0,
2158 const unsigned int shape1[4], std::vector<T> values1,
2159 const unsigned int outShape[4], std::vector<T> outValues,
2160 float qScale = 0.0f, int qOffset = 0)
2161{
2162 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2163 (workloadFactory,
2164 memoryManager,
2165 shape0,
2166 values0,
2167 shape1,
2168 values1,
2169 outShape,
2170 outValues,
2171 qScale,
2172 qOffset);
2173}
2174}
2175
2176LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002178{
2179 const unsigned int width = 2;
2180 const unsigned int height = 2;
2181 const unsigned int channelCount = 2;
2182 const unsigned int batchSize = 2;
2183
2184 unsigned int shape[] = { batchSize, channelCount, height, width };
2185
2186 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2187 3, 3, 3, 3, 4, 4, 4, 4 });
2188
2189 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2190 5, 5, 5, 5, 4, 4, 4, 4 });
2191
kevmay012b4d88e2019-01-24 14:05:09 +00002192 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2193 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002194
kevmay012b4d88e2019-01-24 14:05:09 +00002195 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002196 workloadFactory,
2197 memoryManager,
2198 shape,
2199 input0,
2200 shape,
2201 input1,
2202 shape,
2203 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002204}
2205
kevmay012b4d88e2019-01-24 14:05:09 +00002206LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002207 armnn::IWorkloadFactory& workloadFactory,
2208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2209{
2210 unsigned int shape0[] = { 1, 2, 2, 2 };
2211 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2212
2213 unsigned int shape1[] = { 1, 1, 1, 1 };
2214 std::vector<float> input1({ 1 });
2215
kevmay012b4d88e2019-01-24 14:05:09 +00002216 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002217
kevmay012b4d88e2019-01-24 14:05:09 +00002218 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002219 workloadFactory,
2220 memoryManager,
2221 shape0,
2222 input0,
2223 shape1,
2224 input1,
2225 shape0,
2226 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002227}
2228
kevmay012b4d88e2019-01-24 14:05:09 +00002229LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002230 armnn::IWorkloadFactory& workloadFactory,
2231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2232{
2233 const unsigned int shape0[] = { 1, 2, 2, 3 };
2234 const unsigned int shape1[] = { 1, 1, 1, 3 };
2235
2236 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2237 7, 8, 9, 10, 11, 12 });
2238
2239 std::vector<float> input1({ 1, 2, 3});
2240
kevmay012b4d88e2019-01-24 14:05:09 +00002241 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2242 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002243
kevmay012b4d88e2019-01-24 14:05:09 +00002244 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002245 workloadFactory,
2246 memoryManager,
2247 shape0,
2248 input0,
2249 shape1,
2250 input1,
2251 shape0,
2252 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002253}
2254
2255LayerTestResult<uint8_t, 4> EqualUint8Test(
2256 armnn::IWorkloadFactory& workloadFactory,
2257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2258{
2259 unsigned int shape[] = { 2, 2, 2, 2 };
2260
2261 // See dequantized values to the right.
2262 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002263 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002264
2265 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2266 3, 3, 3, 3, 5, 5, 5, 5 });
2267
2268 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2269 1, 1, 1, 1, 0, 0, 0, 0 });
2270
kevmay012b4d88e2019-01-24 14:05:09 +00002271 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2272 armnn::DataType::QuantisedAsymm8,
2273 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002274 workloadFactory,
2275 memoryManager,
2276 shape,
2277 input0,
2278 shape,
2279 input1,
2280 shape,
2281 output,
2282 1.0f,
2283 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002284}
2285
2286LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2287 armnn::IWorkloadFactory& workloadFactory,
2288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2289{
2290 const unsigned int shape0[] = { 1, 2, 2, 3 };
2291 const unsigned int shape1[] = { 1, 1, 1, 1 };
2292
2293 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2294 7, 8, 9, 10, 11, 12 });
2295
2296 std::vector<uint8_t> input1({ 1 });
2297
2298 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2299 0, 0, 0, 0, 0, 0 });
2300
kevmay012b4d88e2019-01-24 14:05:09 +00002301 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2302 armnn::DataType::QuantisedAsymm8,
2303 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002304 workloadFactory,
2305 memoryManager,
2306 shape0,
2307 input0,
2308 shape1,
2309 input1,
2310 shape0,
2311 output,
2312 1.0f,
2313 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002314}
2315
2316LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2317 armnn::IWorkloadFactory& workloadFactory,
2318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2319{
2320 const unsigned int shape0[] = { 1, 2, 2, 3 };
2321 const unsigned int shape1[] = { 1, 1, 1, 3 };
2322
2323 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2324 7, 8, 9, 10, 11, 12 });
2325
2326 std::vector<uint8_t> input1({ 1, 1, 3});
2327
2328 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2329 0, 0, 0, 0, 0, 0 });
2330
kevmay012b4d88e2019-01-24 14:05:09 +00002331 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2332 armnn::DataType::QuantisedAsymm8,
2333 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002334 workloadFactory,
2335 memoryManager,
2336 shape0,
2337 input0,
2338 shape1,
2339 input1,
2340 shape0,
2341 output,
2342 1.0f,
2343 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002344}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002345
kevmay012b4d88e2019-01-24 14:05:09 +00002346LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2348{
2349 const unsigned int width = 2;
2350 const unsigned int height = 2;
2351 const unsigned int channelCount = 2;
2352 const unsigned int batchSize = 2;
2353
2354 unsigned int shape[] = { batchSize, channelCount, height, width };
2355
2356 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2357 3, 3, 3, 3, 4, 4, 4, 4 });
2358
2359 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2360 5, 5, 5, 5, 4, 4, 4, 4 });
2361
kevmay012b4d88e2019-01-24 14:05:09 +00002362 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2363 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002364
kevmay012b4d88e2019-01-24 14:05:09 +00002365 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002366 workloadFactory,
2367 memoryManager,
2368 shape,
2369 input0,
2370 shape,
2371 input1,
2372 shape,
2373 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002374}
2375
kevmay012b4d88e2019-01-24 14:05:09 +00002376LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002377 armnn::IWorkloadFactory& workloadFactory,
2378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2379{
2380 unsigned int shape0[] = { 1, 2, 2, 2 };
2381 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2382
2383 unsigned int shape1[] = { 1, 1, 1, 1 };
2384 std::vector<float> input1({ 1 });
2385
kevmay012b4d88e2019-01-24 14:05:09 +00002386 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002387
kevmay012b4d88e2019-01-24 14:05:09 +00002388 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002389 workloadFactory,
2390 memoryManager,
2391 shape0,
2392 input0,
2393 shape1,
2394 input1,
2395 shape0,
2396 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002397}
2398
kevmay012b4d88e2019-01-24 14:05:09 +00002399LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002400 armnn::IWorkloadFactory& workloadFactory,
2401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2402{
2403 const unsigned int shape0[] = { 1, 2, 2, 3 };
2404 const unsigned int shape1[] = { 1, 1, 1, 3 };
2405
2406 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2407 7, 8, 9, 10, 11, 12 });
2408
2409 std::vector<float> input1({ 1, 3, 2});
2410
kevmay012b4d88e2019-01-24 14:05:09 +00002411 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2412 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002413
kevmay012b4d88e2019-01-24 14:05:09 +00002414 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002415 workloadFactory,
2416 memoryManager,
2417 shape0,
2418 input0,
2419 shape1,
2420 input1,
2421 shape0,
2422 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002423}
2424
2425LayerTestResult<uint8_t, 4> GreaterUint8Test(
2426 armnn::IWorkloadFactory& workloadFactory,
2427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2428{
2429 unsigned int shape[] = { 2, 2, 2, 2 };
2430
2431 // See dequantized values to the right.
2432 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2433 3, 3, 3, 3, 5, 5, 5, 5 });
2434
2435 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2436 2, 2, 2, 2, 5, 5, 5, 5 });
2437
2438 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2439 1, 1, 1, 1, 0, 0, 0, 0 });
2440
kevmay012b4d88e2019-01-24 14:05:09 +00002441 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2442 armnn::DataType::QuantisedAsymm8,
2443 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002444 workloadFactory,
2445 memoryManager,
2446 shape,
2447 input0,
2448 shape,
2449 input1,
2450 shape,
2451 output,
2452 1.0f,
2453 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002454}
2455
2456LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2457 armnn::IWorkloadFactory& workloadFactory,
2458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2459{
2460 const unsigned int shape0[] = { 1, 2, 2, 3 };
2461 const unsigned int shape1[] = { 1, 1, 1, 1 };
2462
2463 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2464 7, 8, 9, 10, 11, 12 });
2465
2466 std::vector<uint8_t> input1({ 1 });
2467
2468 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2469 1, 1, 1, 1, 1, 1 });
2470
kevmay012b4d88e2019-01-24 14:05:09 +00002471 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2472 armnn::DataType::QuantisedAsymm8,
2473 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002474 workloadFactory,
2475 memoryManager,
2476 shape0,
2477 input0,
2478 shape1,
2479 input1,
2480 shape0,
2481 output,
2482 1.0f,
2483 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002484}
2485
2486LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2487 armnn::IWorkloadFactory& workloadFactory,
2488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2489{
2490 const unsigned int shape0[] = { 1, 2, 2, 3 };
2491 const unsigned int shape1[] = { 1, 1, 1, 3 };
2492
2493 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2494 7, 8, 9, 10, 11, 12 });
2495
2496 std::vector<uint8_t> input1({ 1, 1, 3});
2497
2498 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2499 1, 1, 1, 1, 1, 1 });
2500
kevmay012b4d88e2019-01-24 14:05:09 +00002501 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2502 armnn::DataType::QuantisedAsymm8,
2503 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002504 workloadFactory,
2505 memoryManager,
2506 shape0,
2507 input0,
2508 shape1,
2509 input1,
2510 shape0,
2511 output,
2512 1.0f,
2513 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002514}
2515
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002516LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2518{
2519 const unsigned int width = 2;
2520 const unsigned int height = 2;
2521 const unsigned int channelCount = 2;
2522 const unsigned int batchSize = 2;
2523
2524 unsigned int shape[] = { batchSize, channelCount, height, width };
2525
2526 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2527 3, 3, 3, 3, 4, 4, 4, 4 });
2528
2529 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2530 4, 4, 4, 4, 5, 5, 5, 5 });
2531
2532 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2533 4, 4, 4, 4, 5, 5, 5, 5 });
2534
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002535 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2536 workloadFactory,
2537 memoryManager,
2538 shape,
2539 input0,
2540 shape,
2541 input1,
2542 shape,
2543 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002544}
2545
2546LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2547 armnn::IWorkloadFactory& workloadFactory,
2548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2549{
2550 unsigned int shape0[] = { 1, 2, 2, 2 };
2551 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2552
2553 unsigned int shape1[] = { 1, 1, 1, 1 };
2554 std::vector<float> input1({ 2 });
2555
2556 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002558 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2559 workloadFactory,
2560 memoryManager,
2561 shape0,
2562 input0,
2563 shape1,
2564 input1,
2565 shape0,
2566 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002567}
2568
2569LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2570 armnn::IWorkloadFactory& workloadFactory,
2571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2572{
2573 const unsigned int shape0[] = { 1, 2, 2, 3 };
2574 const unsigned int shape1[] = { 1, 1, 1, 3 };
2575
2576 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2577 7, 8, 9, 10, 11, 12 });
2578
2579 std::vector<float> input1({ 1, 2, 3});
2580
2581 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002582 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002583
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002584 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2585 workloadFactory,
2586 memoryManager,
2587 shape0,
2588 input0,
2589 shape1,
2590 input1,
2591 shape0,
2592 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002593}
2594
2595LayerTestResult<uint8_t, 4> MaximumUint8Test(
2596 armnn::IWorkloadFactory& workloadFactory,
2597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2598{
2599 unsigned int shape[] = { 2, 2, 2, 2 };
2600
2601 // See dequantized values to the right.
2602 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2603 3, 3, 3, 3, 4, 4, 4, 4 });
2604
2605 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2606 4, 4, 4, 4, 5, 5, 5, 5 });
2607
2608 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2609 4, 4, 4, 4, 5, 5, 5, 5 });
2610
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002611 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2612 workloadFactory,
2613 memoryManager,
2614 shape,
2615 input0,
2616 shape,
2617 input1,
2618 shape,
2619 output,
2620 1.0f,
2621 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002622}
2623
2624LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2625 armnn::IWorkloadFactory& workloadFactory,
2626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2627{
2628 const unsigned int shape0[] = { 1, 2, 2, 3 };
2629 const unsigned int shape1[] = { 1, 1, 1, 1 };
2630
2631 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2632 7, 8, 9, 10, 11, 12 });
2633
2634 std::vector<uint8_t> input1({2});
2635
2636 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2637 7, 8, 9, 10, 11, 12 });
2638
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002639 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2640 workloadFactory,
2641 memoryManager,
2642 shape0,
2643 input0,
2644 shape1,
2645 input1,
2646 shape0,
2647 output,
2648 1.0f,
2649 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002650}
2651
2652LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2653 armnn::IWorkloadFactory& workloadFactory,
2654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2655{
2656 const unsigned int shape0[] = { 1, 2, 2, 3 };
2657 const unsigned int shape1[] = { 1, 1, 1, 3 };
2658
2659 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2660 7, 8, 9, 10, 11, 12 });
2661
2662 std::vector<uint8_t> input1({ 1, 10, 3});
2663
2664 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2665 7, 10, 9, 10, 11, 12 });
2666
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002667 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2668 workloadFactory,
2669 memoryManager,
2670 shape0,
2671 input0,
2672 shape1,
2673 input1,
2674 shape0,
2675 output,
2676 1.0f,
2677 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002678}
2679
Sadik Armagan2999a022019-04-09 14:20:12 +01002680LayerTestResult<int16_t, 4> MaximumInt16Test(
2681 armnn::IWorkloadFactory& workloadFactory,
2682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2683{
2684 unsigned int shape[] = { 2, 2, 2, 2 };
2685
2686 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2687 3, 3, 3, 3, 4, 4, 4, 4 });
2688
2689 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2690 4, 4, 4, 4, 5, 5, 5, 5 });
2691
2692 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2693 4, 4, 4, 4, 5, 5, 5, 5 });
2694
2695 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2696 workloadFactory,
2697 memoryManager,
2698 shape,
2699 input0,
2700 shape,
2701 input1,
2702 shape,
2703 output,
2704 1.0f,
2705 0);
2706}
2707
2708LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2711{
2712 const unsigned int shape0[] = { 1, 2, 2, 3 };
2713 const unsigned int shape1[] = { 1, 1, 1, 1 };
2714
2715 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2716 7, 8, 9, 10, 11, 12 });
2717
2718 std::vector<int16_t> input1({2});
2719
2720 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2721 7, 8, 9, 10, 11, 12 });
2722
2723 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2724 workloadFactory,
2725 memoryManager,
2726 shape0,
2727 input0,
2728 shape1,
2729 input1,
2730 shape0,
2731 output,
2732 1.0f,
2733 0);
2734}
2735
2736LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2737 armnn::IWorkloadFactory& workloadFactory,
2738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2739{
2740 const unsigned int shape0[] = { 1, 2, 2, 3 };
2741 const unsigned int shape1[] = { 1, 1, 1, 3 };
2742
2743 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2744 7, 8, 9, 10, 11, 12 });
2745
2746 std::vector<int16_t> input1({ 1, 10, 3});
2747
2748 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2749 7, 10, 9, 10, 11, 12 });
2750
2751 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2752 workloadFactory,
2753 memoryManager,
2754 shape0,
2755 input0,
2756 shape1,
2757 input1,
2758 shape0,
2759 output,
2760 1.0f,
2761 0);
2762}
2763
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002764LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2765 armnn::IWorkloadFactory& workloadFactory,
2766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2767{
2768 unsigned int shape0[] = { 1, 2, 2, 2 };
2769 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2770
2771 unsigned int shape1[] = { 1, 1, 1, 1 };
2772 std::vector<float> input1({ 2 });
2773
2774 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2775
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002776 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2777 workloadFactory,
2778 memoryManager,
2779 shape0,
2780 input0,
2781 shape1,
2782 input1,
2783 shape0,
2784 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002785}
2786
2787
2788LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2789 armnn::IWorkloadFactory& workloadFactory,
2790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2791{
2792 unsigned int shape0[] = { 1, 2, 2, 2 };
2793 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2794
2795 unsigned int shape1[] = { 1, 1, 1, 1 };
2796 std::vector<float> input1({ 5 });
2797
2798 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2799
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002800 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2801 workloadFactory,
2802 memoryManager,
2803 shape0,
2804 input0,
2805 shape1,
2806 input1,
2807 shape0,
2808 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002809}
2810
2811LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2812 armnn::IWorkloadFactory & workloadFactory,
2813 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2814{
2815 const unsigned int shape0[] = { 1, 2, 2, 3 };
2816 const unsigned int shape1[] = { 1, 1, 1, 3 };
2817
2818 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2819 7, 1, 2, 3, 4, 5 });
2820
2821 std::vector<uint8_t> input1({ 1, 2, 3});
2822
2823 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2824 1, 1, 2, 1, 2, 3 });
2825
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002826 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2827 workloadFactory,
2828 memoryManager,
2829 shape0,
2830 input0,
2831 shape1,
2832 input1,
2833 shape0,
2834 output,
2835 1.0f,
2836 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002837}
2838
Sadik Armagan2999a022019-04-09 14:20:12 +01002839LayerTestResult<int16_t, 4> MinimumInt16Test(
2840 armnn::IWorkloadFactory& workloadFactory,
2841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2842{
2843 unsigned int shape[] = { 2, 2, 2, 2 };
2844
2845 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2846 3, 3, 3, 3, 4, 4, 4, 4 });
2847
2848 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2849 4, 4, 4, 4, 5, 5, 5, 5 });
2850
2851 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2852 3, 3, 3, 3, 4, 4, 4, 4 });
2853
2854 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2855 workloadFactory,
2856 memoryManager,
2857 shape,
2858 input0,
2859 shape,
2860 input1,
2861 shape,
2862 output,
2863 1.0f,
2864 0);
2865}
2866
2867LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2868 armnn::IWorkloadFactory& workloadFactory,
2869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2870{
2871 const unsigned int shape0[] = { 1, 2, 2, 3 };
2872 const unsigned int shape1[] = { 1, 1, 1, 1 };
2873
2874 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2875 7, 8, 9, 10, 11, 12 });
2876
2877 std::vector<int16_t> input1({2});
2878
2879 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2880 2, 2, 2, 2, 2, 2 });
2881
2882 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2883 workloadFactory,
2884 memoryManager,
2885 shape0,
2886 input0,
2887 shape1,
2888 input1,
2889 shape0,
2890 output,
2891 1.0f,
2892 0);
2893}
2894
2895LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2896 armnn::IWorkloadFactory& workloadFactory,
2897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2898{
2899 const unsigned int shape0[] = { 1, 2, 2, 3 };
2900 const unsigned int shape1[] = { 1, 1, 1, 3 };
2901
2902 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2903 7, 8, 9, 10, 11, 12 });
2904
2905 std::vector<int16_t> input1({ 1, 10, 3});
2906
2907 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2908 1, 8, 3, 1, 10, 3 });
2909
2910 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2911 workloadFactory,
2912 memoryManager,
2913 shape0,
2914 input0,
2915 shape1,
2916 input1,
2917 shape0,
2918 output,
2919 1.0f,
2920 0);
2921}
2922
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002923namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002924LayerTestResult<float,4> MultiplicationTestHelper(
2925 armnn::IWorkloadFactory& workloadFactory,
2926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2927 const unsigned int shape0[4],
2928 const std::vector<float> & values0,
2929 const unsigned int shape1[4],
2930 const std::vector<float> & values1,
2931 const unsigned int outShape[4],
2932 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002933{
surmeh01bceff2f2018-03-29 16:29:27 +01002934 const size_t dimensionCount = 4;
2935 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2936 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2937 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002938
surmeh01bceff2f2018-03-29 16:29:27 +01002939 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2940 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002941
2942 LayerTestResult<float,4> ret(outputTensorInfo);
2943
2944 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2945 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2946 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2947
2948 armnn::MultiplicationQueueDescriptor data;
2949 armnn::WorkloadInfo info;
2950 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2951 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2952 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2953
2954 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2955
2956 inputHandle0->Allocate();
2957 inputHandle1->Allocate();
2958 outputHandle->Allocate();
2959
2960 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2961 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2962
Derek Lambertif30f7d32019-04-09 10:25:02 +01002963 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002964 workload->Execute();
2965
2966 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2967
surmeh01bceff2f2018-03-29 16:29:27 +01002968 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002969 return ret;
2970}
surmeh01bceff2f2018-03-29 16:29:27 +01002971} // anonymous namespace
2972
2973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002974LayerTestResult<float,4> MultiplicationTest(
2975 armnn::IWorkloadFactory& workloadFactory,
2976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002977{
2978 const unsigned int width = 2;
2979 const unsigned int height = 2;
2980 const unsigned int channelCount = 2;
2981 const unsigned int batchSize = 2;
2982
2983 unsigned int shape[] = { batchSize, channelCount, height, width };
2984
2985 std::vector<float> input0({
2986 1, 1, 1, 1, 2, 2, 2, 2,
2987 3, 3, 3, 3, 4, 4, 4, 4 });
2988
2989 std::vector<float> input1({
2990 2, 2, 2, 2, 3, 3, 3, 3,
2991 4, 4, 4, 4, 5, 5, 5, 5 });
2992
2993 std::vector<float> output({
2994 2, 2, 2, 2, 6, 6, 6, 6,
2995 12, 12, 12, 12, 20, 20, 20, 20 });
2996
2997 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002998 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002999 shape,
3000 input0,
3001 shape,
3002 input1,
3003 shape,
3004 output);
3005}
3006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003007LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3008 armnn::IWorkloadFactory& workloadFactory,
3009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003010{
3011 unsigned int shape0[] = { 1, 2, 2, 2 };
3012 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3013
3014 unsigned int shape1[] = { 1, 1, 1, 1 };
3015 std::vector<float> input1({ 2 });
3016
3017 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3018
3019 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003020 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003021 shape0,
3022 input0,
3023 shape1,
3024 input1,
3025 shape0,
3026 output);
3027}
3028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003029LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3030 armnn::IWorkloadFactory& workloadFactory,
3031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003032{
3033 unsigned int shape0[] = { 1, 3, 3, 2 };
3034 std::vector<float> input0({
3035 1, 2, 3, 4, 5, 6,
3036 7, 8, 9, 10, 11, 12,
3037 13, 14, 15, 16, 17, 18});
3038
3039 unsigned int shape1[] = { 1, 1, 1, 2 };
3040 std::vector<float> input1({ 1, 2 });
3041
3042 std::vector<float> output({
3043 1, 4, 3, 8, 5, 12,
3044 7, 16, 9, 20, 11, 24,
3045 13, 28, 15, 32, 17, 36});
3046
3047 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003048 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003049 shape0,
3050 input0,
3051 shape1,
3052 input1,
3053 shape0,
3054 output);
3055}
telsoa014fcda012018-03-09 14:13:49 +00003056
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003057LayerTestResult<float,4> CompareMultiplicationTest(
3058 armnn::IWorkloadFactory& workloadFactory,
3059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3060 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003061{
3062 const unsigned int width = 16;
3063 const unsigned int height = 32;
3064 const unsigned int channelCount = 2;
3065 const unsigned int batchSize = 5;
3066
3067 armnn::TensorInfo inputTensorInfo0;
3068 armnn::TensorInfo inputTensorInfo1;
3069 armnn::TensorInfo outputTensorInfo;
3070
3071 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3072
3073 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3074 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3075 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3076
3077 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3078
3079 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3080 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3081
3082 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3083 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3084 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3085
3086 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3087 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3088 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3089
3090 armnn::MultiplicationQueueDescriptor data;
3091 armnn::WorkloadInfo info;
3092 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3093 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3094 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3095
3096 armnn::MultiplicationQueueDescriptor refData = data;
3097 armnn::WorkloadInfo refInfo = info;
3098 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3099 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3100 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3101
3102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3103 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3104
3105 inputHandle0->Allocate();
3106 inputHandle1->Allocate();
3107 outputHandle->Allocate();
3108 inputHandle0Ref->Allocate();
3109 inputHandle1Ref->Allocate();
3110 outputHandleRef->Allocate();
3111
3112 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3113 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3114 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3115 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3116
Derek Lambertif30f7d32019-04-09 10:25:02 +01003117 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003118 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003119 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003120 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003121 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3122 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3123
3124 return comparisonResult;
3125}
3126
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003127LayerTestResult<float,4> CompareBatchNormTest(
3128 armnn::IWorkloadFactory& workloadFactory,
3129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3130 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003131{
3132 const unsigned int width = 2;
3133 const unsigned int height = 3;
3134 const unsigned int channels = 5;
3135 const unsigned int batchSize = 3;
3136
3137 armnn::TensorInfo inputTensorInfo;
3138 armnn::TensorInfo outputTensorInfo;
3139 armnn::TensorInfo tensorInfo;
3140
3141 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3142 constexpr unsigned int tensorShape[] = {channels};
3143
3144 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3145 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3146 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3147
3148 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3149
3150 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3151 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3152 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3153 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3154
3155 LayerTestResult<float,4> ret(outputTensorInfo);
3156
3157 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3158 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3159
3160 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3161 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3162
3163 armnn::BatchNormalizationQueueDescriptor data;
3164 armnn::WorkloadInfo info;
3165 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3166 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3167 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3168 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3169
3170 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3171 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3172 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3173 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3174
3175 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3176 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3177 data.m_Mean = &meanTensor;
3178 data.m_Variance = &varianceTensor;
3179 data.m_Beta = &betaTensor;
3180 data.m_Gamma = &gammaTensor;
3181 data.m_Parameters.m_Eps = 0.01f;
3182
3183 armnn::BatchNormalizationQueueDescriptor refData = data;
3184 armnn::WorkloadInfo refInfo = info;
3185 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3186 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3187
3188 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3189 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3190
3191 inputHandle->Allocate();
3192 outputHandle->Allocate();
3193 inputHandleRef->Allocate();
3194 outputHandleRef->Allocate();
3195
3196 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3197 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3198
Derek Lambertif30f7d32019-04-09 10:25:02 +01003199 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003200 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003201 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003202 workloadRef->Execute();
3203
3204 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3205 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3206
3207 return ret;
3208}
3209
surmeh013537c2c2018-05-18 16:31:43 +01003210template<typename T>
3211void PermuteTensorData(
3212 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003214 const armnn::PermutationVector& mappings,
3215 armnn::TensorInfo & inputTensorInfo,
3216 const T * inputData,
3217 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003218{
surmeh013537c2c2018-05-18 16:31:43 +01003219 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3220 if (inputData == nullptr)
3221 {
3222 // Nullptr is an error in the test. By returning without doing the concatenation
3223 // I expect the caller to fail the test. It still makes sense to report this as
3224 // an assert for Debug builds.
3225 return;
3226 }
telsoa014fcda012018-03-09 14:13:49 +00003227
surmeh013537c2c2018-05-18 16:31:43 +01003228 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3229
3230 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3231 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3232
3233 armnn::PermuteQueueDescriptor queueDescriptor;
3234 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3235 armnn::WorkloadInfo workloadInfo;
3236 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3237 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3238
3239 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3240
3241 inputHandle->Allocate();
3242 outputHandle->Allocate();
3243
3244 CopyDataToITensorHandle(inputHandle.get(), inputData);
3245
Derek Lambertif30f7d32019-04-09 10:25:02 +01003246 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003247 workload->Execute();
3248
3249 outputData.resize(outputTensorInfo.GetNumElements());
3250 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3251 inputTensorInfo = outputTensorInfo;
3252}
3253
3254armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
3255 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3256 unsigned int concatDim)
3257{
telsoa014fcda012018-03-09 14:13:49 +00003258 std::vector<armnn::TensorShape> shapes;
3259 shapes.reserve(inputTensorInfos.size());
3260 for (const armnn::TensorInfo& it: inputTensorInfos)
3261 {
3262 shapes.push_back(it.GetShape());
3263 }
surmeh013537c2c2018-05-18 16:31:43 +01003264
3265 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
3266 shapes.end(),
3267 concatDim);
3268}
3269
3270//
narpra015cdda352018-11-19 15:30:27 +00003271// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3272// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3273// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003274//
3275
3276bool NeedPermuteForConcat(
3277 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3278 unsigned int concatDim)
3279{
3280 // See note above. Additionally we expect the input shapes to have the
3281 // same number of dimensions.
3282 unsigned int nDimensions = 0;
3283
telsoa01c577f2c2018-08-31 09:22:23 +01003284 // Determine the number of dimensions as well as sanity check them
3285 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003286 for (auto && tensorInfo : inputTensorInfos)
3287 {
3288 if (!nDimensions)
3289 {
3290 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3291 }
3292 else
3293 {
3294 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3295 "Input shapes must have the same number of dimensions");
3296 }
3297 }
3298
narpra015cdda352018-11-19 15:30:27 +00003299 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003300}
3301
3302armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3303{
3304 unsigned int numDims = inputShape.GetNumDimensions();
3305 if (numDims >= 3)
3306 {
3307 // Nothing to do if the inputShape has at least 3 dimensions.
3308 return inputShape;
3309 }
3310
3311 std::vector<unsigned int> newDims(size_t(3), 1u);
3312 unsigned int expandedBy = 3 - numDims;
3313 for (unsigned int i=0; i<numDims; ++i)
3314 {
3315 newDims[expandedBy+i] = inputShape[i];
3316 }
3317 return armnn::TensorShape(3u, &newDims[0]);
3318}
3319
3320void Generate3dPermuteVectorForConcat(
3321 unsigned int numDimensions,
3322 unsigned int & concatDim,
3323 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3324{
3325 BOOST_ASSERT_MSG(numDimensions <= 3,
3326 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003327 unsigned int expandedBy = 3 - numDimensions;
3328 unsigned int expandedConcatAxis = concatDim + expandedBy;
3329
3330 if (expandedConcatAxis == 2)
3331 {
3332 concatDim = 0;
3333 armnn::PermutationVector forwardPermutation({1, 2, 0});
3334 armnn::PermutationVector reversePermutation({2, 0, 1});
3335 permutations = std::make_pair(forwardPermutation, reversePermutation);
3336 }
3337 else if (expandedConcatAxis == 1)
3338 {
3339 concatDim = 0;
3340 armnn::PermutationVector forwardPermutation({2, 0, 1});
3341 armnn::PermutationVector reversePermutation({1, 2, 0});
3342 permutations = std::make_pair(forwardPermutation, reversePermutation);
3343 }
3344 else
3345 {
3346 BOOST_ASSERT(expandedConcatAxis == 0);
3347 concatDim = 0;
3348 }
3349}
3350
3351//
3352// Permute the input tensors so we can do a supported concatenation.
3353// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3354// at the front. Finally this function tells what the output shape
3355// of the permuted concatenated tensor is going to be.
3356//
3357template <typename T>
3358void PermuteInputsForConcat(
3359 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003361 std::vector<armnn::TensorInfo> & inputTensorInfos,
3362 std::vector<T *> & inputData,
3363 std::vector<std::vector<T>> & inputDataStorage,
3364 armnn::PermutationVector & permuteVector,
3365 unsigned int & concatDim,
3366 armnn::TensorInfo & outputTensorInfo)
3367{
3368 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3369 "Expecting more than one tensor to be concatenated here");
3370
3371 unsigned int numDims = 0;
3372 unsigned int nthInput = 0;
3373 const armnn::PermutationVector identity({0, 1, 2});
3374
3375 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3376 std::make_pair(identity, identity);
3377
3378 inputDataStorage.resize(inputData.size());
3379
3380 for (auto && tensorInfo : inputTensorInfos)
3381 {
3382 if (numDims == 0)
3383 {
3384 numDims = tensorInfo.GetShape().GetNumDimensions();
3385 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003386
telsoa01c577f2c2018-08-31 09:22:23 +01003387 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003388 permuteVector = permutations.second;
3389 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3390 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3391 }
3392 else
3393 {
3394 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3395 "All inputs must have the same number of dimensions");
3396 }
3397
3398 armnn::TensorInfo newTensorInfo = tensorInfo;
3399 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3400
3401 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003402 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003403 permutations.first,
3404 newTensorInfo,
3405 inputData[nthInput],
3406 inputDataStorage[nthInput]);
3407
3408 inputData[nthInput] = inputDataStorage[nthInput].data();
3409 inputTensorInfos[nthInput] = newTensorInfo;
3410
3411 ++nthInput;
3412 }
3413
3414 outputTensorInfo.SetShape(
3415 armnnUtils::Permuted(
3416 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3417 permutations.first));
3418}
3419
3420
3421//
3422// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003423// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003424// output.
3425//
3426template <typename T>
3427void PermuteOutputForConcat(
3428 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003430 const armnn::TensorInfo & tensorInfo,
3431 const armnn::PermutationVector & permuteVector,
3432 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3433 T * data)
3434{
3435 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3436 if (data == nullptr)
3437 {
3438 // Nullptr is an error in the test. By returning without doing the permutation
3439 // I expect the caller to fail the test. It still makes sense to report this as
3440 // an assert for Debug builds.
3441 return;
3442 }
3443
3444 armnn::TensorInfo resultTensorInfo = tensorInfo;
3445 std::vector<T> inputData(tensorInfo.GetNumElements());
3446 std::vector<T> outputData;
3447
3448 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3449
3450 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003451 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003452 permuteVector,
3453 resultTensorInfo,
3454 &inputData[0],
3455 outputData);
3456
3457 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3458}
3459
3460template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003461void Concatenate(
3462 armnn::IWorkloadFactory& workloadFactory,
3463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3464 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3465 std::initializer_list<T *> inputsOrig,
3466 const armnn::TensorInfo& outputTensorInfoOrig,
3467 T * output,
narpra015cdda352018-11-19 15:30:27 +00003468 unsigned int concatDim,
3469 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003470{
3471 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3472 if (output == nullptr)
3473 {
3474 // Nullptr is an error in the test. By returning without doing the permutation
3475 // I expect the caller to fail the test. It still makes sense to report this as
3476 // an assert for Debug builds.
3477 return;
3478 }
3479
telsoa01c577f2c2018-08-31 09:22:23 +01003480 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003481 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3482 std::vector<T *> inputs = inputsOrig;
3483 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3484
3485 armnn::PermutationVector permuteVector{0, 1, 2};
3486
telsoa01c577f2c2018-08-31 09:22:23 +01003487 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003488 std::vector<std::vector<T>> tmpInputDataStorage;
3489
3490 const size_t inputCount = inputTensorInfos.size();
3491
3492 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3493
3494 if (needPermuteForConcat)
3495 {
3496 //
3497 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003498 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003499 //
3500 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003501 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003502 inputTensorInfos,
3503 inputs,
3504 tmpInputDataStorage,
3505 permuteVector,
3506 concatDim,
3507 outputTensorInfo);
3508 }
3509
narpra015cdda352018-11-19 15:30:27 +00003510 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003511
3512 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3513 inputHandles.reserve(inputCount);
3514
narpra015cdda352018-11-19 15:30:27 +00003515 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3516
3517 armnn::MergerQueueDescriptor queueDescriptor;
3518 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
3519 queueDescriptor.m_Parameters = viewsDescriptor;
3520
3521 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003522 {
narpra015cdda352018-11-19 15:30:27 +00003523 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3524 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3525 {
3526 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3527 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3528 }
telsoa014fcda012018-03-09 14:13:49 +00003529
narpra015cdda352018-11-19 15:30:27 +00003530 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003531
narpra015cdda352018-11-19 15:30:27 +00003532 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3533 for (unsigned int i = 0; i < inputCount; ++i)
3534 {
3535 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3536 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3537 subTensorsSupported ?
3538 workloadFactory.CreateSubTensorHandle(*outputHandle,
3539 inputTensorInfo.GetShape(),
3540 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3541 workloadFactory.CreateTensorHandle(inputTensorInfo);
3542
3543 inputHandles.emplace_back(std::move(inputHandle));
3544 }
3545
telsoa014fcda012018-03-09 14:13:49 +00003546 }
narpra015cdda352018-11-19 15:30:27 +00003547 else
3548 {
3549 for (unsigned int i = 0; i < inputCount; ++i)
3550 {
3551 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3552 inputHandles.emplace_back(std::move(inputHandle));
3553 }
3554 }
telsoa014fcda012018-03-09 14:13:49 +00003555
3556 for (unsigned int i = 0; i < inputCount; ++i)
3557 {
surmeh013537c2c2018-05-18 16:31:43 +01003558 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003559 }
3560
3561 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3562
3563 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
3564
3565 for (auto& inputHandle : inputHandles)
3566 {
3567 inputHandle->Allocate();
3568 }
3569
3570 outputHandle->Allocate();
3571
3572 unsigned int nextInputId = 0;
3573 for (auto& inputHandle : inputHandles)
3574 {
surmeh013537c2c2018-05-18 16:31:43 +01003575 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3576 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003577 }
3578
Derek Lambertif30f7d32019-04-09 10:25:02 +01003579 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003580 workload->Execute();
3581
surmeh013537c2c2018-05-18 16:31:43 +01003582 if (needPermuteForConcat)
3583 {
3584 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003585 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003586 outputTensorInfo,
3587 permuteVector,
3588 std::move(outputHandle),
3589 output);
3590 }
3591 else
3592 {
3593 CopyDataFromITensorHandle(output, outputHandle.get());
3594 }
telsoa014fcda012018-03-09 14:13:49 +00003595}
3596
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003597template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003598LayerTestResult<T, 1> Concatenation1dTestImpl(
3599 armnn::IWorkloadFactory& workloadFactory,
3600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3601 float qScale,
3602 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003603{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003604 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003605
3606 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3607 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3608 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3609
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003610 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003611
3612 LayerTestResult<T, 1> result(outputTensorInfo);
3613
3614 std::vector<T> output;
3615 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003616 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003617 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3618 { input0.data(), input1.data(), input2.data() },
3619 outputTensorInfo,
3620 output.data(),
3621 0,
3622 true);
telsoa014fcda012018-03-09 14:13:49 +00003623
3624 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3625 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3626 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3627 }));
3628
3629 return result;
3630}
3631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003632LayerTestResult<float, 1> Concatenation1dTest(
3633 armnn::IWorkloadFactory& workloadFactory,
3634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003636 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003637}
3638
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003639template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003640LayerTestResult<T, 2> Concatenation2dTestImpl(
3641 armnn::IWorkloadFactory& workloadFactory,
3642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003643 const armnn::TensorInfo& outputTensorInfo,
3644 unsigned int dimension,
3645 const float qScale,
3646 const int32_t qOffset)
3647{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003648 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003649
3650 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3651 // Batch 0
3652 1.0f, 2.0f, 3.0f,
3653
3654 // Batch 1
3655 10.0f, 11.0f, 12.0f,
3656 }));
3657
3658 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3659 // Batch 0
3660 4.0f, 5.0f, 6.0f,
3661
3662 // Batch 1
3663 13.0f, 14.0f, 15.0f,
3664 }));
3665
3666 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3667 // Batch 0
3668 7.0f, 8.0f, 9.0f,
3669
3670 // Batch 1
3671 16.0f, 17.0f, 18.0f,
3672 }));
3673
3674 LayerTestResult<T, 2> result(outputTensorInfo);
3675
3676 std::vector<T> output;
3677 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003678 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003679 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3680 { input0.data(), input1.data(), input2.data() },
3681 outputTensorInfo,
3682 output.data(),
3683 dimension,
3684 true);
telsoa014fcda012018-03-09 14:13:49 +00003685
3686 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3687 return result;
3688}
3689
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003690template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003691LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3692 armnn::IWorkloadFactory& workloadFactory,
3693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3694 float qScale,
3695 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003696{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003697 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003698
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003699 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3700 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3701
telsoa014fcda012018-03-09 14:13:49 +00003702 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3703 // Batch 0
3704 1.0f, 2.0f, 3.0f,
3705
3706 // Batch 1
3707 10.0f, 11.0f, 12.0f,
3708
3709 // Batch 2
3710 4.0f, 5.0f, 6.0f,
3711
3712 // Batch 3
3713 13.0f, 14.0f, 15.0f,
3714
3715 // Batch 4
3716 7.0f, 8.0f, 9.0f,
3717
3718 // Batch 5
3719 16.0f, 17.0f, 18.0f,
3720 }));
3721
3722 return result;
3723}
3724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003725LayerTestResult<float, 2> Concatenation2dDim0Test(
3726 armnn::IWorkloadFactory& workloadFactory,
3727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003729 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003730}
3731
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003732template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003733LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3734 armnn::IWorkloadFactory& workloadFactory,
3735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3736 float qScale,
3737 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003738{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003739 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003740
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003741 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3742 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3743
telsoa014fcda012018-03-09 14:13:49 +00003744 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3745 // Batch 0
3746 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3747
3748 // Batch 1
3749 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3750 }));
3751
3752 return result;
3753}
3754
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003755LayerTestResult<float, 2> Concatenation2dDim1Test(
3756 armnn::IWorkloadFactory& workloadFactory,
3757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003758{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003759 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003760}
3761
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003762template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003763LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3764 armnn::IWorkloadFactory& workloadFactory,
3765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3766 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003767 int32_t qOffset)
3768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003769 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003770 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3771 // Batch 0
3772 1.0f, 2.0f, 3.0f,
3773
3774 // Batch 1
3775 10.0f, 11.0f, 12.0f,
3776 }));
3777
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003778 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003779 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3780 // Batch 0
3781 4.0f, 5.0f, 6.0f,
3782
3783 // Batch 1
3784 13.0f, 14.0f, 15.0f,
3785
3786 // Batch 0
3787 7.0f, 8.0f, 9.0f,
3788 }));
3789
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003790 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003791 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3792 // Batch 1
3793 16.0f, 17.0f, 18.0f,
3794 }));
3795
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003796 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003797 LayerTestResult<T, 2> result(outputTensorInfo);
3798
3799 std::vector<T> output;
3800 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003801 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003802 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3803 { input0.data(), input1.data(), input2.data() },
3804 outputTensorInfo,
3805 output.data(),
3806 0,
3807 true);
telsoa014fcda012018-03-09 14:13:49 +00003808
3809 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3810 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3811 // Batch 0
3812 1.0f, 2.0f, 3.0f,
3813
3814 // Batch 1
3815 10.0f, 11.0f, 12.0f,
3816
3817 // Batch 2
3818 4.0f, 5.0f, 6.0f,
3819
3820 // Batch 3
3821 13.0f, 14.0f, 15.0f,
3822
3823 // Batch 4
3824 7.0f, 8.0f, 9.0f,
3825
3826 // Batch 5
3827 16.0f, 17.0f, 18.0f,
3828 }));
3829
3830 return result;
3831}
3832
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003833LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3834 armnn::IWorkloadFactory& workloadFactory,
3835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003836{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003837 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3838 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003839}
3840
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003841template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003842LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3843 armnn::IWorkloadFactory& workloadFactory,
3844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3845 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003846 int32_t qOffset)
3847{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003848 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003849 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3850 // Batch 0
3851 1.0f, 2.0f, 3.0f,
3852
3853 // Batch 1
3854 10.0f, 11.0f, 12.0f,
3855 }));
3856
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003857 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003858 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3859 // Batch 0
3860 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3861
3862 // Batch 1
3863 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3864 }));
3865
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003866 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003867 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3868 // Batch 0
3869 9.0f,
3870
3871 // Batch 1
3872 18.0f
3873 }));
3874
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003875 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003876 LayerTestResult<T, 2> result(outputTensorInfo);
3877
3878 std::vector<T> output;
3879 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003880 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003881 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3882 { input0.data(), input1.data(), input2.data() },
3883 outputTensorInfo,
3884 output.data(),
3885 1,
3886 true);
telsoa014fcda012018-03-09 14:13:49 +00003887
3888 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3889 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3890 // Batch 0
3891 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3892
3893 // Batch 1
3894 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3895 }));
3896
3897 return result;
3898}
3899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003900LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3901 armnn::IWorkloadFactory& workloadFactory,
3902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003903{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003904 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3905 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003906}
3907
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003908template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003909LayerTestResult<T, 3> Concatenation3dTestImpl(
3910 armnn::IWorkloadFactory& workloadFactory,
3911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003912 const armnn::TensorInfo& outputTensorInfo,
3913 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003914 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003915 float qScale,
3916 int32_t qOffset)
3917{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003918 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00003919
3920 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3921 // Batch 0, Channel 0
3922 1.0f, 2.0f,
3923
3924 // Batch 0, Channel 1
3925 3.0f, 4.0f,
3926
3927 // Batch 0, Channel 2
3928 5.0f, 6.0f,
3929
3930 // Batch 1, Channel 0
3931 19.0f, 20.0f,
3932
3933 // Batch 1, Channel 1
3934 21.0f, 22.0f,
3935
3936 // Batch 1, Channel 2
3937 23.0f, 24.0f
3938 }));
3939
3940 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3941 // Batch 0, Channel 0
3942 7.0f, 8.0f,
3943
3944 // Batch 0, Channel 1
3945 9.0f, 10.0f,
3946
3947 // Batch 0, Channel 2
3948 11.0f, 12.0f,
3949
3950 // Batch 1, Channel 0
3951 25.0f, 26.0f,
3952
3953 // Batch 1, Channel 1
3954 27.0f, 28.0f,
3955
3956 // Batch 1, Channel 2
3957 29.0f, 30.0f
3958 }));
3959
3960 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3961 // Batch 0, Channel 0
3962 13.0f, 14.0f,
3963
3964 // Batch 0, Channel 1
3965 15.0f, 16.0f,
3966
3967 // Batch 0, Channel 2
3968 17.0f, 18.0f,
3969
3970 // Batch 1, Channel 0
3971 31.0f, 32.0f,
3972
3973 // Batch 1, Channel 1
3974 33.0f, 34.0f,
3975
3976 // Batch 1, Channel 2
3977 35.0f, 36.0f
3978 }));
3979
3980 LayerTestResult<T, 3> result(outputTensorInfo);
3981
3982 std::vector<T> output;
3983 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003984 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003985 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3986 { input0.data(), input1.data(), input2.data() },
3987 outputTensorInfo,
3988 output.data(),
3989 dimension,
3990 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003991
3992 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3993 return result;
3994}
3995
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003996template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003997LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3998 armnn::IWorkloadFactory& workloadFactory,
3999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4000 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004001 int32_t qOffset)
4002{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004003 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004004
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004005 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4006 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4007
telsoa014fcda012018-03-09 14:13:49 +00004008 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4009 // Batch 0, Channel 0
4010 1.0f, 2.0f,
4011
4012 // Batch 0, Channel 1
4013 3.0f, 4.0f,
4014
4015 // Batch 0, Channel 2
4016 5.0f, 6.0f,
4017
4018 // Batch 1, Channel 0
4019 19.0f, 20.0f,
4020
4021 // Batch 1, Channel 1
4022 21.0f, 22.0f,
4023
4024 // Batch 1, Channel 2
4025 23.0f, 24.0f,
4026
4027 // Batch 2, Channel 0
4028 7.0f, 8.0f,
4029
4030 // Batch 2, Channel 1
4031 9.0f, 10.0f,
4032
4033 // Batch 2, Channel 2
4034 11.0f, 12.0f,
4035
4036 // Batch 3, Channel 0
4037 25.0f, 26.0f,
4038
4039 // Batch 3, Channel 1
4040 27.0f, 28.0f,
4041
4042 // Batch 3, Channel 2
4043 29.0f, 30.0f,
4044
4045 // Batch 4, Channel 0
4046 13.0f, 14.0f,
4047
4048 // Batch 4, Channel 1
4049 15.0f, 16.0f,
4050
4051 // Batch 4, Channel 2
4052 17.0f, 18.0f,
4053
4054 // Batch 5, Channel 0
4055 31.0f, 32.0f,
4056
4057 // Batch 5, Channel 1
4058 33.0f, 34.0f,
4059
4060 // Batch 5, Channel 2
4061 35.0f, 36.0f
4062 }));
narpra015cdda352018-11-19 15:30:27 +00004063
telsoa014fcda012018-03-09 14:13:49 +00004064 return result;
4065}
4066
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004067LayerTestResult<float, 3> Concatenation3dDim0Test(
4068 armnn::IWorkloadFactory& workloadFactory,
4069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004070{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004071 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004072}
4073
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004074template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004075LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4076 armnn::IWorkloadFactory& workloadFactory,
4077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4078 float qScale,
4079 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004081 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004082
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004083 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4084 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004085
telsoa014fcda012018-03-09 14:13:49 +00004086 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4087 // Batch 0, Channel 0
4088 1.0f, 2.0f,
4089
4090 // Batch 0, Channel 1
4091 3.0f, 4.0f,
4092
4093 // Batch 0, Channel 2
4094 5.0f, 6.0f,
4095
4096 // Batch 0, Channel 3
4097 7.0f, 8.0f,
4098
4099 // Batch 0, Channel 4
4100 9.0f, 10.0f,
4101
4102 // Batch 0, Channel 5
4103 11.0f, 12.0f,
4104
4105 // Batch 0, Channel 6
4106 13.0f, 14.0f,
4107
4108 // Batch 0, Channel 7
4109 15.0f, 16.0f,
4110
4111 // Batch 0, Channel 8
4112 17.0f, 18.0f,
4113
4114 // Batch 1, Channel 0
4115 19.0f, 20.0f,
4116
4117 // Batch 1, Channel 1
4118 21.0f, 22.0f,
4119
4120 // Batch 1, Channel 2
4121 23.0f, 24.0f,
4122
4123 // Batch 1, Channel 3
4124 25.0f, 26.0f,
4125
4126 // Batch 1, Channel 4
4127 27.0f, 28.0f,
4128
4129 // Batch 1, Channel 5
4130 29.0f, 30.0f,
4131
4132 // Batch 1, Channel 6
4133 31.0f, 32.0f,
4134
4135 // Batch 1, Channel 7
4136 33.0f, 34.0f,
4137
4138 // Batch 1, Channel 8
4139 35.0f, 36.0f
4140 }));
4141
4142 return result;
4143}
4144
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004145LayerTestResult<float, 3> Concatenation3dDim1Test(
4146 armnn::IWorkloadFactory& workloadFactory,
4147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004148{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004149 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004150}
4151
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004152template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004153LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4154 armnn::IWorkloadFactory& workloadFactory,
4155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004156 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004157 float qScale,
4158 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004159{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004160 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004161
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004162 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4163 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004164
telsoa014fcda012018-03-09 14:13:49 +00004165 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4166 // Batch 0, Channel 0
4167 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4168
4169 // Batch 0, Channel 1
4170 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4171
4172 // Batch 0, Channel 2
4173 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4174
4175 // Batch 1, Channel 0
4176 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4177
4178 // Batch 1, Channel 1
4179 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4180
4181 // Batch 1, Channel 2
4182 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4183 }));
4184
4185 return result;
4186}
4187
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004188LayerTestResult<float, 3> Concatenation3dDim2Test(
4189 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4191 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004193 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4194 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004195}
4196
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004197template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004198LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4199 armnn::IWorkloadFactory& workloadFactory,
4200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4201 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004202 int32_t qOffset)
4203{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004204 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004205 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4206 // Batch 0, Channel 0
4207 1.0f, 2.0f,
4208
4209 // Batch 0, Channel 1
4210 3.0f, 4.0f,
4211
4212 // Batch 0, Channel 2
4213 5.0f, 6.0f,
4214
4215 // Batch 1, Channel 0
4216 19.0f, 20.0f,
4217
4218 // Batch 1, Channel 1
4219 21.0f, 22.0f,
4220
4221 // Batch 1, Channel 2
4222 23.0f, 24.0f
4223 }));
4224
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004225 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004226 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4227 // Batch 0, Channel 0
4228 7.0f, 8.0f,
4229
4230 // Batch 0, Channel 1
4231 9.0f, 10.0f,
4232
4233 // Batch 0, Channel 2
4234 11.0f, 12.0f,
4235 }));
4236
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004237 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004238 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4239 // Batch 0, Channel 0
4240 25.0f, 26.0f,
4241
4242 // Batch 0, Channel 1
4243 27.0f, 28.0f,
4244
4245 // Batch 0, Channel 2
4246 29.0f, 30.0f,
4247
4248 // Batch 1, Channel 0
4249 13.0f, 14.0f,
4250
4251 // Batch 1, Channel 1
4252 15.0f, 16.0f,
4253
4254 // Batch 1, Channel 2
4255 17.0f, 18.0f,
4256
4257 // Batch 2, Channel 0
4258 31.0f, 32.0f,
4259
4260 // Batch 2, Channel 1
4261 33.0f, 34.0f,
4262
4263 // Batch 2, Channel 2
4264 35.0f, 36.0f
4265 }));
4266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004267 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004268 LayerTestResult<T, 3> result(outputTensorInfo);
4269
4270 std::vector<T> output;
4271 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004272 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004273 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4274 { input0.data(), input1.data(), input2.data() },
4275 outputTensorInfo,
4276 output.data(),
4277 0,
4278 true);
telsoa014fcda012018-03-09 14:13:49 +00004279
4280 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4281 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4282 // Batch 0, Channel 0
4283 1.0f, 2.0f,
4284
4285 // Batch 0, Channel 1
4286 3.0f, 4.0f,
4287
4288 // Batch 0, Channel 2
4289 5.0f, 6.0f,
4290
4291 // Batch 1, Channel 0
4292 19.0f, 20.0f,
4293
4294 // Batch 1, Channel 1
4295 21.0f, 22.0f,
4296
4297 // Batch 1, Channel 2
4298 23.0f, 24.0f,
4299
4300 // Batch 2, Channel 0
4301 7.0f, 8.0f,
4302
4303 // Batch 2, Channel 1
4304 9.0f, 10.0f,
4305
4306 // Batch 2, Channel 2
4307 11.0f, 12.0f,
4308
4309 // Batch 3, Channel 0
4310 25.0f, 26.0f,
4311
4312 // Batch 3, Channel 1
4313 27.0f, 28.0f,
4314
4315 // Batch 3, Channel 2
4316 29.0f, 30.0f,
4317
4318 // Batch 4, Channel 0
4319 13.0f, 14.0f,
4320
4321 // Batch 4, Channel 1
4322 15.0f, 16.0f,
4323
4324 // Batch 4, Channel 2
4325 17.0f, 18.0f,
4326
4327 // Batch 5, Channel 0
4328 31.0f, 32.0f,
4329
4330 // Batch 5, Channel 1
4331 33.0f, 34.0f,
4332
4333 // Batch 5, Channel 2
4334 35.0f, 36.0f
4335 }));
4336
4337 return result;
4338}
4339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004340LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4341 armnn::IWorkloadFactory& workloadFactory,
4342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004343{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004344 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4345 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004346}
4347
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004348template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004349LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4350 armnn::IWorkloadFactory& workloadFactory,
4351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4352 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004353 int32_t qOffset)
4354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004355 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004356 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4357 // Batch 0, Channel 0
4358 1.0f, 2.0f,
4359
4360 // Batch 0, Channel 1
4361 3.0f, 4.0f,
4362
4363 // Batch 0, Channel 2
4364 5.0f, 6.0f,
4365
4366 // Batch 1, Channel 0
4367 19.0f, 20.0f,
4368
4369 // Batch 1, Channel 1
4370 21.0f, 22.0f,
4371
4372 // Batch 1, Channel 2
4373 23.0f, 24.0f
4374 }));
4375
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004376 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004377 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4378 // Batch 0, Channel 0
4379 7.0f, 8.0f,
4380
4381 // Batch 0, Channel 1
4382 9.0f, 10.0f,
4383
4384 // Batch 0, Channel 2
4385 11.0f, 12.0f,
4386
4387 // Batch 0, Channel 3
4388 25.0f, 26.0f,
4389
4390 // Batch 1, Channel 0
4391 27.0f, 28.0f,
4392
4393 // Batch 1, Channel 1
4394 29.0f, 30.0f,
4395
4396 // Batch 1, Channel 2
4397 13.0f, 14.0f,
4398
4399 // Batch 1, Channel 3
4400 15.0f, 16.0f,
4401 }));
4402
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004403 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004404 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4405 // Batch 0, Channel 0
4406 17.0f, 18.0f,
4407
4408 // Batch 1, Channel 0
4409 31.0f, 32.0f,
4410 }));
4411
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004412 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004413 LayerTestResult<T, 3> result(outputTensorInfo);
4414
4415 std::vector<T> output;
4416 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004417 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004418 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4419 { input0.data(), input1.data(), input2.data() },
4420 outputTensorInfo,
4421 output.data(),
4422 1,
4423 true);
telsoa014fcda012018-03-09 14:13:49 +00004424
4425 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4426 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4427 // Batch 0, Channel 0
4428 1.0f, 2.0f,
4429
4430 // Batch 0, Channel 1
4431 3.0f, 4.0f,
4432
4433 // Batch 0, Channel 2
4434 5.0f, 6.0f,
4435
4436 // Batch 0, Channel 3
4437 7.0f, 8.0f,
4438
4439 // Batch 0, Channel 4
4440 9.0f, 10.0f,
4441
4442 // Batch 0, Channel 5
4443 11.0f, 12.0f,
4444
4445 // Batch 0, Channel 6
4446 25.0f, 26.0f,
4447
4448 // Batch 0, Channel 7
4449 17.0f, 18.0f,
4450
4451 // Batch 1, Channel 0
4452 19.0f, 20.0f,
4453
4454 // Batch 1, Channel 1
4455 21.0f, 22.0f,
4456
4457 // Batch 1, Channel 2
4458 23.0f, 24.0f,
4459
4460 // Batch 1, Channel 3
4461 27.0f, 28.0f,
4462
4463 // Batch 1, Channel 4
4464 29.0f, 30.0f,
4465
4466 // Batch 1, Channel 5
4467 13.0f, 14.0f,
4468
4469 // Batch 1, Channel 6
4470 15.0f, 16.0f,
4471
4472 // Batch 1, Channel 7
4473 31.0f, 32.0f,
4474 }));
4475
4476 return result;
4477}
4478
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004479LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4480 armnn::IWorkloadFactory& workloadFactory,
4481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004483 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4484 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004485}
4486
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004487template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004488LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4489 armnn::IWorkloadFactory& workloadFactory,
4490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004491 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004492 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004493 int32_t qOffset)
4494{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004495 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004496 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4497 // Batch 0, Channel 0
4498 1.0f, 2.0f,
4499
4500 // Batch 0, Channel 1
4501 3.0f, 4.0f,
4502
4503 // Batch 0, Channel 2
4504 5.0f, 6.0f,
4505
4506 // Batch 1, Channel 0
4507 19.0f, 20.0f,
4508
4509 // Batch 1, Channel 1
4510 21.0f, 22.0f,
4511
4512 // Batch 1, Channel 2
4513 23.0f, 24.0f
4514 }));
4515
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004516 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004517 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4518 // Batch 0, Channel 0
4519 7.0f,
4520
4521 // Batch 0, Channel 1
4522 9.0f,
4523
4524 // Batch 0, Channel 2
4525 11.0f,
4526
4527 // Batch 1, Channel 0
4528 25.0f,
4529
4530 // Batch 1, Channel 1
4531 27.0f,
4532
4533 // Batch 1, Channel 2
4534 29.0f
4535 }));
4536
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004537 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004538 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4539 // Batch 0, Channel 0
4540 13.0f, 14.0f, 50.0f,
4541
4542 // Batch 0, Channel 1
4543 15.0f, 16.0f, 51.0f,
4544
4545 // Batch 0, Channel 2
4546 17.0f, 18.0f, 52.0f,
4547
4548 // Batch 1, Channel 0
4549 31.0f, 32.0f, 53.0f,
4550
4551 // Batch 1, Channel 1
4552 33.0f, 34.0f, 54.0f,
4553
4554 // Batch 1, Channel 2
4555 35.0f, 36.0f, 55.0f,
4556 }));
4557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004558 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004559 LayerTestResult<T, 3> result(outputTensorInfo);
4560
4561 std::vector<T> output;
4562 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004563 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004564 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4565 { input0.data(), input1.data(), input2.data() },
4566 outputTensorInfo,
4567 output.data(),
4568 2,
4569 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004570
4571 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4572 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4573 // Batch 0, Channel 0
4574 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4575
4576 // Batch 0, Channel 1
4577 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4578
4579 // Batch 0, Channel 2
4580 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4581
4582 // Batch 1, Channel 0
4583 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4584
4585 // Batch 1, Channel 1
4586 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4587
4588 // Batch 1, Channel 2
4589 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4590 }));
4591
4592 return result;
4593}
4594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004595LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4596 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4598 bool useSubtensor)
4599{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004600 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4601 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004602}
4603
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004604template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004605LayerTestResult<T, 4> Concatenation4dTestImpl(
4606 armnn::IWorkloadFactory& workloadFactory,
4607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4608 const armnn::TensorInfo& outputTensorInfo,
4609 unsigned int dimension,
4610 bool useSubtensor,
4611 float qScale,
4612 int32_t qOffset)
4613{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004614 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004615
4616 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4617 1.0f, 2.0f,
4618 3.0f, 4.0f,
4619 5.0f, 6.0f,
4620 7.0f, 8.0f,
4621 9.0f, 10.0f,
4622 11.0f, 12.0f
4623 }));
4624
4625 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4626 11.0f, 12.0f,
4627 13.0f, 14.0f,
4628 15.0f, 16.0f,
4629 17.0f, 18.0f,
4630 19.0f, 20.0f,
4631 21.0f, 22.0f
4632 }));
4633
4634 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4635 21.0f, 22.0f,
4636 23.0f, 24.0f,
4637 25.0f, 26.0f,
4638 27.0f, 28.0f,
4639 29.0f, 30.0f,
4640 31.0f, 32.0f
4641 }));
4642
4643 LayerTestResult<T, 4> result(outputTensorInfo);
4644
4645 std::vector<T> output;
4646 output.resize(outputTensorInfo.GetNumElements());
4647
4648 Concatenate<T>(workloadFactory,
4649 memoryManager,
4650 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4651 {input0.data(), input1.data(), input2.data()},
4652 outputTensorInfo,
4653 output.data(),
4654 dimension,
4655 useSubtensor);
4656
4657 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4658 return result;
4659}
4660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004661template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004662LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4663 armnn::IWorkloadFactory& workloadFactory,
4664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4665 float qScale,
4666 int32_t qOffset)
4667{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004668 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004669
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004670 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4671 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4672
narpra015cdda352018-11-19 15:30:27 +00004673 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4674 1.0f, 2.0f,
4675 3.0f, 4.0f,
4676 5.0f, 6.0f,
4677 7.0f, 8.0f,
4678 9.0f, 10.0f,
4679 11.0f, 12.0f,
4680
4681 11.0f, 12.0f,
4682 13.0f, 14.0f,
4683 15.0f, 16.0f,
4684 17.0f, 18.0f,
4685 19.0f, 20.0f,
4686 21.0f, 22.0f,
4687
4688 21.0f, 22.0f,
4689 23.0f, 24.0f,
4690 25.0f, 26.0f,
4691 27.0f, 28.0f,
4692 29.0f, 30.0f,
4693 31.0f, 32.0f
4694 }));
4695 return result;
4696}
4697
4698LayerTestResult<float, 4> Concatenation4dDim0Test(
4699 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004701{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004702 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004703}
4704
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004705template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004706LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4707 armnn::IWorkloadFactory& workloadFactory,
4708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4709 float qScale,
4710 int32_t qOffset)
4711{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004712 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004713
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004714 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4715 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4716
narpra015cdda352018-11-19 15:30:27 +00004717 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4718 1.0f, 2.0f,
4719 3.0f, 4.0f,
4720 5.0f, 6.0f,
4721 7.0f, 8.0f,
4722 9.0f, 10.0f,
4723 11.0f, 12.0f,
4724
4725 11.0f, 12.0f,
4726 13.0f, 14.0f,
4727 15.0f, 16.0f,
4728 17.0f, 18.0f,
4729 19.0f, 20.0f,
4730 21.0f, 22.0f,
4731
4732 21.0f, 22.0f,
4733 23.0f, 24.0f,
4734 25.0f, 26.0f,
4735 27.0f, 28.0f,
4736 29.0f, 30.0f,
4737 31.0f, 32.0f
4738 }));
4739
4740 return result;
4741}
4742
4743LayerTestResult<float, 4> Concatenation4dDim1Test(
4744 armnn::IWorkloadFactory& workloadFactory,
4745 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4746{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004747 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004748}
4749
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004750template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004751LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4752 armnn::IWorkloadFactory& workloadFactory,
4753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4754 float qScale,
4755 int32_t qOffset)
4756{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004757 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004758
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004759 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4760 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4761
narpra015cdda352018-11-19 15:30:27 +00004762 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4763 1.0f, 2.0f,
4764 3.0f, 4.0f,
4765 11.0f, 12.0f,
4766 13.0f, 14.0f,
4767 21.0f, 22.0f,
4768 23.0f, 24.0f,
4769
4770 5.0f, 6.0f,
4771 7.0f, 8.0f,
4772 15.0f, 16.0f,
4773 17.0f, 18.0f,
4774 25.0f, 26.0f,
4775 27.0f, 28.0f,
4776
4777 9.0f, 10.0f,
4778 11.0f, 12.0f,
4779 19.0f, 20.0f,
4780 21.0f, 22.0f,
4781 29.0f, 30.0f,
4782 31.0f, 32.0f
4783 }));
4784
4785 return result;
4786}
4787
4788LayerTestResult<float, 4> Concatenation4dDim2Test(
4789 armnn::IWorkloadFactory& workloadFactory,
4790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4791{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004792 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004793}
4794
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004795template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004796LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4797 armnn::IWorkloadFactory& workloadFactory,
4798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4799 float qScale,
4800 int32_t qOffset,
4801 bool useSubtensor)
4802{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004803 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004805 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4806 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4807
narpra015cdda352018-11-19 15:30:27 +00004808 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4809 1.0f, 2.0f,
4810 11.0f, 12.0f,
4811 21.0f, 22.0f,
4812 3.0f, 4.0f,
4813 13.0f, 14.0f,
4814 23.0f, 24.0f,
4815
4816 5.0f, 6.0f,
4817 15.0f, 16.0f,
4818 25.0f, 26.0f,
4819 7.0f, 8.0f,
4820 17.0f, 18.0f,
4821 27.0f, 28.0f,
4822
4823 9.0f, 10.0f,
4824 19.0f, 20.0f,
4825 29.0f, 30.0f,
4826 11.0f, 12.0f,
4827 21.0f, 22.0f,
4828 31.0f, 32.0f
4829 }));
4830
4831 return result;
4832}
4833
4834LayerTestResult<float, 4> Concatenation4dDim3Test(
4835 armnn::IWorkloadFactory& workloadFactory,
4836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4837 bool useSubtensor)
4838{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004839 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4840 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004841}
4842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004843template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004844LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4845 armnn::IWorkloadFactory& workloadFactory,
4846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4847 float qScale,
4848 int32_t qOffset)
4849{
4850 unsigned int dimension = 0;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004851 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004852
4853 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4854 1.0f, 2.0f,
4855 3.0f, 4.0f,
4856 5.0f, 6.0f,
4857 7.0f, 8.0f,
4858 9.0f, 10.0f,
4859 11.0f, 12.0f
4860 }));
4861
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004862 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004863
4864 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4865 11.0f, 12.0f,
4866 13.0f, 14.0f,
4867 15.0f, 16.0f,
4868 17.0f, 18.0f,
4869 19.0f, 20.0f,
4870 21.0f, 22.0f,
4871
4872 21.0f, 22.0f,
4873 23.0f, 24.0f,
4874 25.0f, 26.0f,
4875 27.0f, 28.0f,
4876 29.0f, 30.0f,
4877 31.0f, 32.0f
4878
4879 }));
4880
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004881 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004882
4883 LayerTestResult<T, 4> result(outputTensorInfo);
4884
4885 std::vector<T> output;
4886 output.resize(outputTensorInfo.GetNumElements());
4887 Concatenate<T>(workloadFactory,
4888 memoryManager,
4889 {inputTensorInfo0, inputTensorInfo1},
4890 {input0.data(), input1.data()},
4891 outputTensorInfo,
4892 output.data(),
4893 dimension,
4894 true);
4895
4896 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4897 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4898 1.0f, 2.0f,
4899 3.0f, 4.0f,
4900 5.0f, 6.0f,
4901 7.0f, 8.0f,
4902 9.0f, 10.0f,
4903 11.0f, 12.0f,
4904
4905 11.0f, 12.0f,
4906 13.0f, 14.0f,
4907 15.0f, 16.0f,
4908 17.0f, 18.0f,
4909 19.0f, 20.0f,
4910 21.0f, 22.0f,
4911
4912 21.0f, 22.0f,
4913 23.0f, 24.0f,
4914 25.0f, 26.0f,
4915 27.0f, 28.0f,
4916 29.0f, 30.0f,
4917 31.0f, 32.0f
4918 }));
4919
4920 return result;
4921}
4922
4923LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4924 armnn::IWorkloadFactory& workloadFactory,
4925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4926{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004927 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4928 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004929}
4930
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004931template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004932LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4933 armnn::IWorkloadFactory& workloadFactory,
4934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4935 float qScale,
4936 int32_t qOffset)
4937{
4938 unsigned int dimension = 1;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004939 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004940
4941 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4942 1.0f, 2.0f,
4943 3.0f, 4.0f,
4944 5.0f, 6.0f,
4945 7.0f, 8.0f,
4946 9.0f, 10.0f,
4947 11.0f, 12.0f
4948 }));
4949
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004950 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004951
4952 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4953 11.0f, 12.0f,
4954 13.0f, 14.0f,
4955 15.0f, 16.0f,
4956 17.0f, 18.0f,
4957
4958 }));
4959
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004960 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00004961
4962 LayerTestResult<T, 4> result(outputTensorInfo);
4963
4964 std::vector<T> output;
4965 output.resize(outputTensorInfo.GetNumElements());
4966 Concatenate<T>(workloadFactory,
4967 memoryManager,
4968 {inputTensorInfo0, inputTensorInfo1},
4969 {input0.data(), input1.data()},
4970 outputTensorInfo,
4971 output.data(),
4972 dimension,
4973 true);
4974
4975 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4976 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4977 1.0f, 2.0f,
4978 3.0f, 4.0f,
4979 5.0f, 6.0f,
4980 7.0f, 8.0f,
4981 9.0f, 10.0f,
4982 11.0f, 12.0f,
4983 11.0f, 12.0f,
4984 13.0f, 14.0f,
4985 15.0f, 16.0f,
4986 17.0f, 18.0f
4987 }));
4988
4989 return result;
4990}
4991
4992LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4993 armnn::IWorkloadFactory& workloadFactory,
4994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4995{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004996 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
4997 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004998}
4999
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005000template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005001LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5002 armnn::IWorkloadFactory& workloadFactory,
5003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5004 float qScale,
5005 int32_t qOffset)
5006{
5007 unsigned int dimension = 2;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005008 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005009
5010 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5011 1.0f, 2.0f,
5012 3.0f, 4.0f,
5013 5.0f, 6.0f,
5014 7.0f, 8.0f,
5015 9.0f, 10.0f,
5016 11.0f, 12.0f
5017 }));
5018
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005019 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005020
5021 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5022 11.0f, 12.0f,
5023 13.0f, 14.0f,
5024 15.0f, 16.0f,
5025 17.0f, 18.0f,
5026 19.0f, 20.0f,
5027 21.0f, 22.0f,
5028 23.0f, 24.0f,
5029 25.0f, 26.0f,
5030 27.0f, 28.0f
5031 }));
5032
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005033 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005034
5035 LayerTestResult<T, 4> result(outputTensorInfo);
5036
5037 std::vector<T> output;
5038 output.resize(outputTensorInfo.GetNumElements());
5039 Concatenate<T>(workloadFactory,
5040 memoryManager,
5041 {inputTensorInfo0, inputTensorInfo1},
5042 {input0.data(), input1.data()},
5043 outputTensorInfo,
5044 output.data(),
5045 dimension,
5046 true);
5047
5048 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5049 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5050 1.0f, 2.0f,
5051 3.0f, 4.0f,
5052 11.0f, 12.0f,
5053 13.0f, 14.0f,
5054 15.0f, 16.0f,
5055
5056 5.0f, 6.0f,
5057 7.0f, 8.0f,
5058 17.0f, 18.0f,
5059 19.0f, 20.0f,
5060 21.0f, 22.0f,
5061
5062 9.0f, 10.0f,
5063 11.0f, 12.0f,
5064 23.0f, 24.0f,
5065 25.0f, 26.0f,
5066 27.0f, 28.0f
5067 }));
5068
5069 return result;
5070}
5071
5072LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5073 armnn::IWorkloadFactory& workloadFactory,
5074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5075{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005076 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5077 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005078}
5079
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005081LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5082 armnn::IWorkloadFactory& workloadFactory,
5083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5084 float qScale,
5085 int32_t qOffset,
5086 bool useSubtensor)
5087{
5088 unsigned int dimension = 3;
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005089 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005090
5091 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5092 1.0f, 2.0f,
5093 3.0f, 4.0f,
5094 5.0f, 6.0f,
5095 7.0f, 8.0f,
5096 9.0f, 10.0f,
5097 11.0f, 12.0f
5098 }));
5099
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005100 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005101
5102 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5103 11.0f, 12.0f, 13.0f,
5104 14.0f, 15.0f, 16.0f,
5105
5106 17.0f, 18.0f, 19.0f,
5107 20.0f, 21.0f, 22.0f,
5108
5109 23.0f, 24.0f, 25.0f,
5110 26.0f, 27.0f, 28.0f
5111 }));
5112
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005113 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
narpra015cdda352018-11-19 15:30:27 +00005114
5115 LayerTestResult<T, 4> result(outputTensorInfo);
5116
5117 std::vector<T> output;
5118 output.resize(outputTensorInfo.GetNumElements());
5119 Concatenate<T>(workloadFactory,
5120 memoryManager,
5121 {inputTensorInfo0, inputTensorInfo1},
5122 {input0.data(), input1.data()},
5123 outputTensorInfo,
5124 output.data(),
5125 dimension,
5126 useSubtensor);
5127
5128 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5129 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5130 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5131 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5132 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5133 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5134 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5135 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5136 }));
5137
5138 return result;
5139}
5140
5141LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5142 armnn::IWorkloadFactory& workloadFactory,
5143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5144 bool useSubtensor)
5145{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005146 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5147 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005148}
5149
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005150LayerTestResult<float, 4> ResizeBilinearNopTest(
5151 armnn::IWorkloadFactory& workloadFactory,
5152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005153 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005154{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005155 const armnn::TensorInfo inputTensorInfo =
5156 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5157
5158 const armnn::TensorInfo outputTensorInfo =
5159 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005160
James Conroy6b965822018-11-01 11:33:09 +00005161 std::vector<float> inputData({
5162 1.0f, 2.0f, 3.0f, 4.0f,
5163 2.0f, 3.0f, 4.0f, 5.0f,
5164 3.0f, 4.0f, 5.0f, 6.0f,
5165 4.0f, 5.0f, 6.0f, 7.0f,
5166
telsoa014fcda012018-03-09 14:13:49 +00005167 1.0f, 2.0f, 3.0f, 4.0f,
5168 2.0f, 3.0f, 4.0f, 5.0f,
5169 3.0f, 4.0f, 5.0f, 6.0f,
5170 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00005171 });
5172
5173 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005174 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005175 {
5176 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005177 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005178 inputData = tmp;
5179 }
5180
5181 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005182
5183 LayerTestResult<float, 4> result(outputTensorInfo);
5184 result.outputExpected = input;
5185
5186 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5187 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5188
5189 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005190 descriptor.m_Parameters.m_DataLayout = dataLayout;
5191 armnn::WorkloadInfo info;
5192 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5193 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5194
5195 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5196
5197 inputHandle->Allocate();
5198 outputHandle->Allocate();
5199 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5200
Derek Lambertif30f7d32019-04-09 10:25:02 +01005201 workload->PostAllocationConfigure();
James Conroy074f3712018-10-03 09:32:03 +01005202 workload->Execute();
5203
5204 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5205 return result;
5206}
5207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005208LayerTestResult<float, 4> SimpleResizeBilinearTest(
5209 armnn::IWorkloadFactory& workloadFactory,
5210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005211 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005212{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005213 const armnn::TensorInfo inputTensorInfo =
5214 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5215
5216 const armnn::TensorInfo outputTensorInfo =
5217 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005218
James Conroy6b965822018-11-01 11:33:09 +00005219 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005220 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005221 200.0f, 250.0f,
5222
5223 250.0f, 200.0f,
5224 250.0f, 1.0f
5225 });
James Conroy074f3712018-10-03 09:32:03 +01005226
5227 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5228 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005229 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5230 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5231 // which we would expect if projecting the centre).
5232
5233 std::vector<float> outputData({
5234 1.0f,
5235
5236 250.0f
5237 });
5238
5239 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005240 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005241 {
5242 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005243 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005244 inputData = tmp;
5245
5246 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005247 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005248 outputData = tmp1;
5249 }
5250
5251 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5252
James Conroy074f3712018-10-03 09:32:03 +01005253 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005254 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005255
5256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5258
5259 armnn::ResizeBilinearQueueDescriptor descriptor;
5260 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005261 armnn::WorkloadInfo info;
5262 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5263 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5264
5265 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5266
5267 inputHandle->Allocate();
5268 outputHandle->Allocate();
5269 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5270
Derek Lambertif30f7d32019-04-09 10:25:02 +01005271 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005272 workload->Execute();
5273
5274 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5275 return result;
5276}
5277
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005278LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5279 armnn::IWorkloadFactory& workloadFactory,
5280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005281 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005282{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005283 const armnn::TensorInfo inputTensorInfo =
5284 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5285
5286 const armnn::TensorInfo outputTensorInfo =
5287 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005288
James Conroy6b965822018-11-01 11:33:09 +00005289 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005290 1.0f, 2.0f, 3.0f, 4.0f,
5291 2.0f, 3.0f, 4.0f, 5.0f,
5292 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005293 4.0f, 5.0f, 6.0f, 7.0f,
5294
5295 7.0f, 6.0f, 5.0f, 4.0f,
5296 6.0f, 5.0f, 4.0f, 3.0f,
5297 5.0f, 4.0f, 3.0f, 2.0f,
5298 4.0f, 3.0f, 2.0f, 1.0f
5299 });
5300
5301 std::vector<float> outputData({
5302 1.0f, 3.0f,
5303 3.0f, 5.0f,
5304
5305 7.0f, 5.0f,
5306 5.0f, 3.0f
5307 });
5308
5309 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005310 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005311 {
5312 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005313 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005314 inputData = tmp;
5315
5316 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005317 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005318 outputData = tmp1;
5319 }
5320
5321 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005322
telsoa014fcda012018-03-09 14:13:49 +00005323 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005324 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005325
5326 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5327 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5328
5329 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005330 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005331 armnn::WorkloadInfo info;
5332 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5333 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5334
5335 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5336
5337 inputHandle->Allocate();
5338 outputHandle->Allocate();
5339 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5340
Derek Lambertif30f7d32019-04-09 10:25:02 +01005341 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005342 workload->Execute();
5343
5344 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5345 return result;
5346}
5347
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005348LayerTestResult<float, 4> ResizeBilinearMinTest(
5349 armnn::IWorkloadFactory& workloadFactory,
5350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005351 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005352{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005353 const armnn::TensorInfo inputTensorInfo =
5354 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5355
5356 const armnn::TensorInfo outputTensorInfo =
5357 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005358
James Conroy6b965822018-11-01 11:33:09 +00005359 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005360 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5361 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005362 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5363
5364 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5365 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5366 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5367 });
5368
5369 std::vector<float> outputData({
5370 1.0f, 2.6666f, 6.00f,
5371 78.5f, 179.3333f, 401.00f,
5372
5373 987.0f, 454.6670f, 203.33f,
5374 48.5f, 22.3333f, 10.00f
5375 });
5376
5377 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005378 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005379 {
5380 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005381 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005382 inputData = tmp;
5383
5384 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005385 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005386 outputData = tmp1;
5387 }
5388
5389 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005390
5391 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005392 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005393
5394 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5395 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5396
5397 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005398 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005399 armnn::WorkloadInfo info;
5400 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5401 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5402
5403 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5404
5405 inputHandle->Allocate();
5406 outputHandle->Allocate();
5407 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5408
Derek Lambertif30f7d32019-04-09 10:25:02 +01005409 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005410 workload->Execute();
5411
5412 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5413 return result;
5414}
5415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005416LayerTestResult<float, 4> ResizeBilinearMagTest(
5417 armnn::IWorkloadFactory& workloadFactory,
5418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005419 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005421 const armnn::TensorInfo inputTensorInfo =
5422 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5423
5424 const armnn::TensorInfo outputTensorInfo =
5425 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005426
James Conroy6b965822018-11-01 11:33:09 +00005427 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005428 1.0f, 2.0f,
5429 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005430 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005431
James Conroy6b965822018-11-01 11:33:09 +00005432 233.0f, 144.0f,
5433 21.0f, 13.0f,
5434 2.0f, 1.0f
5435 });
5436
5437 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005438 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5439 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005440 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5441
5442 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5443 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5444 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5445 });
5446
5447 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005448 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005449 {
5450 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005451 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005452 inputData = tmp;
5453
5454 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005455 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005456 outputData = tmp1;
5457 }
5458
5459 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5460
5461 LayerTestResult<float, 4> result(outputTensorInfo);
5462 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005463
5464 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5465 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5466
5467 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005468 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005469 armnn::WorkloadInfo info;
5470 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5471 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5472
5473 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5474
5475 inputHandle->Allocate();
5476 outputHandle->Allocate();
5477 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5478
Derek Lambertif30f7d32019-04-09 10:25:02 +01005479 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005480 workload->Execute();
5481
5482 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5483 return result;
5484}
5485
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005486LayerTestResult<float, 2> FakeQuantizationTest(
5487 armnn::IWorkloadFactory& workloadFactory,
5488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005489{
5490 constexpr unsigned int width = 2;
5491 constexpr unsigned int height = 3;
5492
5493 const armnn::TensorInfo tensorInfo({height, width },
5494 armnn::DataType::Float32);
5495 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5496 -10.0f, -5.0f,
5497 0.0f, 5.0f,
5498 10.0f, 10.0f
5499 }));
5500
5501 LayerTestResult<float, 2> ret(tensorInfo);
5502
5503 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5504
5505 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5506
5507 armnn::FakeQuantizationQueueDescriptor data;
5508 armnn::WorkloadInfo info;
5509
5510 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5511 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5512 float min = -10.f;
5513 float max = 10.f;
5514
5515 data.m_Parameters.m_Min = min;
5516 data.m_Parameters.m_Max = max;
5517
5518 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5519 armnn::FakeQuantizationQueueDescriptor refData = data;
5520 armnn::WorkloadInfo refInfo = info;
5521 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5522
5523 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5524
5525 inputHandle->Allocate();
5526 outputHandle->Allocate();
5527
5528 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5529
Derek Lambertif30f7d32019-04-09 10:25:02 +01005530 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005531 workload->Execute();
5532
5533 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5534
5535 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5536 0.0f, 63.0f,
5537 128.0f, 191.0f,
5538 255.0f, 255.0f
5539 }));
5540 return ret;
5541}
5542
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005543namespace
5544{
5545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005546LayerTestResult<float, 4> L2NormalizationTestImpl(
5547 armnn::IWorkloadFactory& workloadFactory,
5548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5549 const armnn::TensorShape& inputOutputTensorShape,
5550 const std::vector<float>& inputValues,
5551 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005552 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005553{
5554 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5555 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5556
jimfly013aab7c32018-11-12 13:32:08 +00005557 // at this point if we require it permute the input data
5558 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5559 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005560 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005561 {
5562 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005563 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005564 inputData = tmp;
5565 }
5566
5567 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005568
5569 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005570 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005571 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005572 {
5573 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005574 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5575 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005576 expectedOutputData = tmp;
5577 }
5578 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005579
5580 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5581 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5582
5583 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005584 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005585 armnn::WorkloadInfo info;
5586
5587 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5588 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5589
5590 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5591
5592 inputHandle->Allocate();
5593 outputHandle->Allocate();
5594
5595 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5596
Derek Lambertif30f7d32019-04-09 10:25:02 +01005597 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005598 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005599
5600 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5601
5602 return result;
5603}
5604
5605float CalcInvL2Norm(std::initializer_list<float> elements)
5606{
5607 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5608 [](float acc, float element) { return acc + element * element; });
5609 return 1.0f / sqrtf(reduction);
5610}
5611
5612} // anonymous namespace
5613
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005614template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005615LayerTestResult<T, 2> Pad2dTestCommon(
5616 armnn::IWorkloadFactory& workloadFactory,
5617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5618 float qScale,
5619 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005620{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005621 const armnn::TensorShape inputShape{ 3, 3 };
5622 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005623
Derek Lambertif30f7d32019-04-09 10:25:02 +01005624 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5625 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005626
Derek Lambertif30f7d32019-04-09 10:25:02 +01005627 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005628 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005629 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005630 // Height (3) x Width (3)
5631 4, 8, 6,
5632 7, 4, 4,
5633 3, 2, 4
5634 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005635
Derek Lambertif30f7d32019-04-09 10:25:02 +01005636 std::vector<T> expectedOutputValues(
5637 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005638 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005639 0, 0, 0, 0, 0, 0, 0,
5640 0, 0, 0, 0, 0, 0, 0,
5641 0, 0, 4, 8, 6, 0, 0,
5642 0, 0, 7, 4, 4, 0, 0,
5643 0, 0, 3, 2, 4, 0, 0,
5644 0, 0, 0, 0, 0, 0, 0,
5645 0, 0, 0, 0, 0, 0, 0
5646 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005647
Derek Lambertif30f7d32019-04-09 10:25:02 +01005648 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005649
Derek Lambertif30f7d32019-04-09 10:25:02 +01005650 LayerTestResult<T, 2> result(outputTensorInfo);
5651 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005652
Derek Lambertif30f7d32019-04-09 10:25:02 +01005653 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5654 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005655
Derek Lambertif30f7d32019-04-09 10:25:02 +01005656 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005657
Derek Lambertif30f7d32019-04-09 10:25:02 +01005658 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5659 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5660 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005661
Derek Lambertif30f7d32019-04-09 10:25:02 +01005662 descriptor.m_Parameters.m_PadList = PadList;
5663 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005664
Derek Lambertif30f7d32019-04-09 10:25:02 +01005665 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5666 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005667
Derek Lambertif30f7d32019-04-09 10:25:02 +01005668 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005669
Derek Lambertif30f7d32019-04-09 10:25:02 +01005670 inputHandle->Allocate();
5671 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005672
Derek Lambertif30f7d32019-04-09 10:25:02 +01005673 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005674
Derek Lambertif30f7d32019-04-09 10:25:02 +01005675 workload->PostAllocationConfigure();
5676 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005677
Derek Lambertif30f7d32019-04-09 10:25:02 +01005678 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005679
Derek Lambertif30f7d32019-04-09 10:25:02 +01005680 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005681}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005682
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005683template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005684LayerTestResult<T, 3> Pad3dTestCommon(
5685 armnn::IWorkloadFactory& workloadFactory,
5686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5687 float qScale,
5688 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005689{
5690 const armnn::TensorShape inputShape{ 2, 2, 2 };
5691 const armnn::TensorShape outputShape{ 3, 5, 6 };
5692
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005693 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5694 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005695
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005696 std::vector<T> inputValues(
5697 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005698 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005699 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005700 0, 4,
5701 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005702
5703 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005704 6, 1,
5705 5, 2
5706 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005707
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005708 std::vector<T> expectedOutputValues(
5709 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005710 {
5711
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005712 0, 0, 0, 0, 0, 0,
5713 0, 0, 0, 0, 0, 0,
5714 0, 0, 0, 4, 0, 0,
5715 0, 0, 2, 5, 0, 0,
5716 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005717
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005718 0, 0, 0, 0, 0, 0,
5719 0, 0, 0, 0, 0, 0,
5720 0, 0, 6, 1, 0, 0,
5721 0, 0, 5, 2, 0, 0,
5722 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005723
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005724 0, 0, 0, 0, 0, 0,
5725 0, 0, 0, 0, 0, 0,
5726 0, 0, 0, 0, 0, 0,
5727 0, 0, 0, 0, 0, 0,
5728 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005729
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005730 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005731
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005732 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005733
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005734 LayerTestResult<T, 3> result(outputTensorInfo);
5735 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005736
5737 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5738 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5739
5740 armnn::PadQueueDescriptor descriptor;
5741
5742 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5743 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5744 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5745 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5746
5747 descriptor.m_Parameters.m_PadList = PadList;
5748 armnn::WorkloadInfo info;
5749
5750 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5751 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5752
5753 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5754
5755 inputHandle->Allocate();
5756 outputHandle->Allocate();
5757
5758 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5759
Derek Lambertif30f7d32019-04-09 10:25:02 +01005760 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005761 workload->Execute();
5762
5763 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5764
5765 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005766}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005767
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005768template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005769LayerTestResult<T, 4> Pad4dTestCommon(
5770 armnn::IWorkloadFactory& workloadFactory,
5771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5772 float qScale,
5773 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005774{
5775 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5776 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5777
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005778 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5779 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005780
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005781 std::vector<T> inputValues(
5782 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005783 {
5784 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005785 0, 1,
5786 2, 3,
5787 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005788
5789 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005790 6, 7,
5791 8, 9,
5792 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005793
5794 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005795 12, 13,
5796 14, 15,
5797 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005798
5799 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005800 18, 19,
5801 20, 21,
5802 22, 23
5803 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005804
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005805 std::vector<T> expectedOutputValues(
5806 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005807 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005808 0, 0, 0, 0,
5809 0, 0, 0, 0,
5810 0, 0, 0, 0,
5811 0, 0, 0, 0,
5812 0, 0, 0, 0,
5813 0, 0, 0, 0,
5814 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005816 0, 0, 0, 0,
5817 0, 0, 0, 0,
5818 0, 0, 0, 0,
5819 0, 0, 0, 0,
5820 0, 0, 0, 0,
5821 0, 0, 0, 0,
5822 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005823
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005824 0, 0, 0, 0,
5825 0, 0, 0, 0,
5826 0, 0, 0, 0,
5827 0, 0, 0, 0,
5828 0, 0, 0, 0,
5829 0, 0, 0, 0,
5830 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005832 0, 0, 0, 0,
5833 0, 0, 0, 0,
5834 0, 0, 0, 0,
5835 0, 0, 0, 0,
5836 0, 0, 0, 0,
5837 0, 0, 0, 0,
5838 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005839
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005840 0, 0, 0, 0,
5841 0, 0, 0, 0,
5842 0, 0, 0, 0,
5843 0, 0, 0, 0,
5844 0, 0, 0, 0,
5845 0, 0, 0, 0,
5846 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005847
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005848 0, 0, 0, 0,
5849 0, 0, 0, 0,
5850 0, 0, 0, 0,
5851 0, 0, 0, 0,
5852 0, 0, 0, 0,
5853 0, 0, 0, 0,
5854 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005855
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005856 0, 0, 0, 0,
5857 0, 0, 0, 0,
5858 0, 0, 0, 0,
5859 0, 0, 0, 0,
5860 0, 0, 0, 0,
5861 0, 0, 0, 0,
5862 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005863
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005864 0, 0, 0, 0,
5865 0, 0, 0, 0,
5866 0, 0, 0, 0,
5867 0, 0, 1, 0,
5868 0, 2, 3, 0,
5869 0, 4, 5, 0,
5870 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005871
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005872 0, 0, 0, 0,
5873 0, 0, 0, 0,
5874 0, 0, 0, 0,
5875 0, 6, 7, 0,
5876 0, 8, 9, 0,
5877 0, 10, 11, 0,
5878 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005879
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005880 0, 0, 0, 0,
5881 0, 0, 0, 0,
5882 0, 0, 0, 0,
5883 0, 0, 0, 0,
5884 0, 0, 0, 0,
5885 0, 0, 0, 0,
5886 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005887
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005888 0, 0, 0, 0,
5889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
5892 0, 0, 0, 0,
5893 0, 0, 0, 0,
5894 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005895
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005896 0, 0, 0, 0,
5897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
5900 0, 0, 0, 0,
5901 0, 0, 0, 0,
5902 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005903
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005904 0, 0, 0, 0,
5905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 12, 13, 0,
5908 0, 14, 15, 0,
5909 0, 16, 17, 0,
5910 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005911
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005912 0, 0, 0, 0,
5913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 18, 19, 0,
5916 0, 20, 21, 0,
5917 0, 22, 23, 0,
5918 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005919
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005920 0, 0, 0, 0,
5921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
5924 0, 0, 0, 0,
5925 0, 0, 0, 0,
5926 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005928 0, 0, 0, 0,
5929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 0, 0,
5932 0, 0, 0, 0,
5933 0, 0, 0, 0,
5934 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005935
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005936 0, 0, 0, 0,
5937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 0, 0, 0,
5940 0, 0, 0, 0,
5941 0, 0, 0, 0,
5942 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005943
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005944 0, 0, 0, 0,
5945 0, 0, 0, 0,
5946 0, 0, 0, 0,
5947 0, 0, 0, 0,
5948 0, 0, 0, 0,
5949 0, 0, 0, 0,
5950 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005951
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005952 0, 0, 0, 0,
5953 0, 0, 0, 0,
5954 0, 0, 0, 0,
5955 0, 0, 0, 0,
5956 0, 0, 0, 0,
5957 0, 0, 0, 0,
5958 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005959
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005960 0, 0, 0, 0,
5961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
5964 0, 0, 0, 0,
5965 0, 0, 0, 0,
5966 0, 0, 0, 0
5967 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005968
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005969 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005970
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005971 LayerTestResult<T, 4> result(outputTensorInfo);
5972 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005973
5974 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5975 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5976
5977 armnn::PadQueueDescriptor descriptor;
5978
5979 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5980 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5981 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5982 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5983 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5984
5985 descriptor.m_Parameters.m_PadList = PadList;
5986 armnn::WorkloadInfo info;
5987
5988 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5989 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5990
5991 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5992
5993 inputHandle->Allocate();
5994 outputHandle->Allocate();
5995
5996 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5997
Derek Lambertif30f7d32019-04-09 10:25:02 +01005998 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005999 workload->Execute();
6000
6001 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6002
6003 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006004}
6005
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006006LayerTestResult<uint8_t, 2> PadUint82dTest(
6007 armnn::IWorkloadFactory& workloadFactory,
6008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006010 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006011}
6012
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006013LayerTestResult<uint8_t, 3> PadUint83dTest(
6014 armnn::IWorkloadFactory& workloadFactory,
6015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006016{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006017 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006018}
6019
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006020LayerTestResult<uint8_t, 4> PadUint84dTest(
6021 armnn::IWorkloadFactory& workloadFactory,
6022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006023{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006024 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006025}
6026
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006027LayerTestResult<float, 2> PadFloat322dTest(
6028 armnn::IWorkloadFactory& workloadFactory,
6029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006030{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006031 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006032}
6033
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006034LayerTestResult<float, 3> PadFloat323dTest(
6035 armnn::IWorkloadFactory& workloadFactory,
6036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006037{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006038 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006039}
6040
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006041LayerTestResult<float, 4> PadFloat324dTest(
6042 armnn::IWorkloadFactory& workloadFactory,
6043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006045 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006046}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006048LayerTestResult<float, 4> L2Normalization1dTest(
6049 armnn::IWorkloadFactory& workloadFactory,
6050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006051 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006052{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006053 // Width: 1
6054 // Height: 1
6055 // Channels: 10
6056 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006057 unsigned int numberOfBatches = 1;
6058 unsigned int numberOfChannels = 10;
6059 unsigned int height = 1;
6060 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006061
jimfly013aab7c32018-11-12 13:32:08 +00006062
Nina Drozdd41b2592018-11-19 13:03:36 +00006063 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006064 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006065 std::vector<float> inputValues
6066 {
6067 // Batch 0, Channel 0, Height (1) x Width (1)
6068 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006069
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006070 // Batch 0, Channel 1, Height (1) x Width (1)
6071 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006072
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006073 // Batch 0, Channel 2, Height (1) x Width (1)
6074 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006075
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006076 // Batch 0, Channel 3, Height (1) x Width (1)
6077 4.0f,
6078
6079 // Batch 0, Channel 4, Height (1) x Width (1)
6080 5.0f,
6081
6082 // Batch 0, Channel 5, Height (1) x Width (1)
6083 6.0f,
6084
6085 // Batch 0, Channel 6, Height (1) x Width (1)
6086 7.0f,
6087
6088 // Batch 0, Channel 7, Height (1) x Width (1)
6089 8.0f,
6090
6091 // Batch 0, Channel 8, Height (1) x Width (1)
6092 9.0f,
6093
6094 // Batch 0, Channel 9, Height (1) x Width (1)
6095 10.0f
6096 };
telsoa014fcda012018-03-09 14:13:49 +00006097 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006098 std::vector<float> expectedOutputValues
6099 {
6100 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00006101 1.0f * approxInvL2Norm,
6102 2.0f * approxInvL2Norm,
6103 3.0f * approxInvL2Norm,
6104 4.0f * approxInvL2Norm,
6105 5.0f * approxInvL2Norm,
6106 6.0f * approxInvL2Norm,
6107 7.0f * approxInvL2Norm,
6108 8.0f * approxInvL2Norm,
6109 9.0f * approxInvL2Norm,
6110 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006111 };
telsoa014fcda012018-03-09 14:13:49 +00006112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006113
6114 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006115 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006116}
6117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006118LayerTestResult<float, 4> L2Normalization2dTest(
6119 armnn::IWorkloadFactory& workloadFactory,
6120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006121 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006122{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006123 // Width: 5
6124 // Height: 1
6125 // Channels: 2
6126 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006127 unsigned int numberOfBatches = 1;
6128 unsigned int numberOfChannels = 2;
6129 unsigned int height = 1;
6130 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006131
Nina Drozdd41b2592018-11-19 13:03:36 +00006132 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006133 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006134 std::vector<float> inputValues
6135 {
6136 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006137 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006138
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006139 // Batch 0, Channel 1, Height (1) x Width (5)
6140 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6141 };
6142 std::vector<float> expectedOutputValues
6143 {
6144 // Batch 0, Channel 0, Height (1) x Width (5)
6145 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6146 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6147 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6148 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006149 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
6150
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006151 // Batch 0, Channel 1, Height (1) x Width (5)
6152 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6153 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6154 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6155 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006156 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006157 };
telsoa014fcda012018-03-09 14:13:49 +00006158
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006159 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006160 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006161}
telsoa014fcda012018-03-09 14:13:49 +00006162
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006163LayerTestResult<float, 4> L2Normalization3dTest(
6164 armnn::IWorkloadFactory& workloadFactory,
6165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006166 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006167{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006168 // Width: 3
6169 // Height: 4
6170 // Channels: 2
6171 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006172 unsigned int numberOfBatches = 1;
6173 unsigned int numberOfChannels = 2;
6174 unsigned int height = 4;
6175 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006176
Nina Drozdd41b2592018-11-19 13:03:36 +00006177 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006178 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006179 std::vector<float> inputValues
6180 {
6181 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006182 119.0f, 21.0f, 150.0f,
6183 149.0f, 32.0f, 179.0f,
6184 15.0f, 227.0f, 141.0f,
6185 147.0f, 199.0f, 220.0f,
6186
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006187 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006188 110.0f, 140.0f, 73.0f,
6189 211.0f, 212.0f, 89.0f,
6190 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006191 162.0f, 12.0f, 161.0f
6192 };
6193 std::vector<float> expectedOutputValues
6194 {
6195 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006196 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6197 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6198 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6199 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6200 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6201 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6202 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6203 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6204 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6205 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6206 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6207 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6208
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006209 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006210 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6211 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6212 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6213 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6214 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6215 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6216 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6217 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6218 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6219 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6220 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006221 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6222 };
telsoa014fcda012018-03-09 14:13:49 +00006223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006224 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006225 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006226}
telsoa014fcda012018-03-09 14:13:49 +00006227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006228LayerTestResult<float, 4> L2Normalization4dTest(
6229 armnn::IWorkloadFactory& workloadFactory,
6230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006231 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006232{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006233 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006234 // Height: 4
6235 // Channels: 3
6236 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006237 unsigned int numberOfBatches = 2;
6238 unsigned int numberOfChannels = 3;
6239 unsigned int height = 4;
6240 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006241
Nina Drozdd41b2592018-11-19 13:03:36 +00006242 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006243 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006244 std::vector<float> inputValues
6245 {
6246 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006247 235.0f, 46.0f, 178.0f,
6248 100.0f, 123.0f, 19.0f,
6249 172.0f, 74.0f, 250.0f,
6250 6.0f, 195.0f, 80.0f,
6251
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006252 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006253 113.0f, 95.0f, 202.0f,
6254 77.0f, 114.0f, 71.0f,
6255 122.0f, 246.0f, 166.0f,
6256 82.0f, 28.0f, 37.0f,
6257
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006258 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006259 56.0f, 170.0f, 162.0f,
6260 194.0f, 89.0f, 254.0f,
6261 12.0f, 209.0f, 200.0f,
6262 1.0f, 64.0f, 54.0f,
6263
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006264 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006265 67.0f, 90.0f, 49.0f,
6266 7.0f, 163.0f, 18.0f,
6267 25.0f, 117.0f, 103.0f,
6268 247.0f, 59.0f, 189.0f,
6269
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006270 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006271 239.0f, 104.0f, 199.0f,
6272 17.0f, 124.0f, 153.0f,
6273 222.0f, 217.0f, 75.0f,
6274 32.0f, 126.0f, 21.0f,
6275
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006276 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006277 97.0f, 145.0f, 215.0f,
6278 115.0f, 116.0f, 238.0f,
6279 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006280 92.0f, 125.0f, 88.0f
6281 };
6282 std::vector<float> expectedOutputValues
6283 {
6284 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006285 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6286 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6287 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6288 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6289 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6290 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6291 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6292 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6293 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6294 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6295 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6296 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6297
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006298 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006299 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6300 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6301 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6302 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6303 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6304 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6305 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6306 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6307 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6308 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6309 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6310 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6311
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006312 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006313 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6314 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6315 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6316 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6317 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6318 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6319 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6320 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6321 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6322 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6323 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6324 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6325
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006326 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006327 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6328 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6329 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6330 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6331 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6332 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6333 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6334 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6335 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6336 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6337 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6338 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6339
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006340 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006341 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6342 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6343 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6344 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6345 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6346 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6347 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6348 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6349 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6350 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6351 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6352 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6353
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006354 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006355 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6356 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6357 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6358 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6359 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6360 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6361 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6362 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6363 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6364 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6365 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006366 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6367 };
telsoa014fcda012018-03-09 14:13:49 +00006368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006369 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006370 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006371}
6372
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006373template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006374LayerTestResult<T, 4> ConstantTestImpl(
6375 armnn::IWorkloadFactory& workloadFactory,
6376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006377 float qScale,
6378 int32_t qOffset)
6379{
6380 constexpr unsigned int inputWidth = 3;
6381 constexpr unsigned int inputHeight = 4;
6382 constexpr unsigned int inputChannels = 3;
6383 constexpr unsigned int inputBatchSize = 2;
6384
6385 constexpr unsigned int outputWidth = inputWidth;
6386 constexpr unsigned int outputHeight = inputHeight;
6387 constexpr unsigned int outputChannels = inputChannels;
6388 constexpr unsigned int outputBatchSize = inputBatchSize;
6389
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006390 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00006391
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006392 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00006393
6394 // Set quantization parameters if the requested type is a quantized type.
6395 if(armnn::IsQuantizedType<T>())
6396 {
6397 inputTensorInfo.SetQuantizationScale(qScale);
6398 inputTensorInfo.SetQuantizationOffset(qOffset);
6399 outputTensorInfo.SetQuantizationScale(qScale);
6400 outputTensorInfo.SetQuantizationOffset(qOffset);
6401 }
6402
6403 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6404 QuantizedVector<T>(qScale, qOffset, {
6405 // Batch 0, Channel 0
6406 235.0f, 46.0f, 178.0f,
6407 100.0f, 123.0f, 19.0f,
6408 172.0f, 74.0f, 250.0f,
6409 6.0f, 195.0f, 80.0f,
6410
6411 // Batch 0, Channel 1
6412 113.0f, 95.0f, 202.0f,
6413 77.0f, 114.0f, 71.0f,
6414 122.0f, 246.0f, 166.0f,
6415 82.0f, 28.0f, 37.0f,
6416
6417 // Batch 0, Channel 2
6418 56.0f, 170.0f, 162.0f,
6419 194.0f, 89.0f, 254.0f,
6420 12.0f, 209.0f, 200.0f,
6421 1.0f, 64.0f, 54.0f,
6422
6423 // Batch 1, Channel 0
6424 67.0f, 90.0f, 49.0f,
6425 7.0f, 163.0f, 18.0f,
6426 25.0f, 117.0f, 103.0f,
6427 247.0f, 59.0f, 189.0f,
6428
6429 // Batch 1, Channel 1
6430 239.0f, 104.0f, 199.0f,
6431 17.0f, 124.0f, 153.0f,
6432 222.0f, 217.0f, 75.0f,
6433 32.0f, 126.0f, 21.0f,
6434
6435 // Batch 1, Channel 2
6436 97.0f, 145.0f, 215.0f,
6437 115.0f, 116.0f, 238.0f,
6438 226.0f, 16.0f, 132.0f,
6439 92.0f, 125.0f, 88.0f,
6440 })));
6441
6442 LayerTestResult<T, 4> result(outputTensorInfo);
6443 result.outputExpected = input;
6444
6445 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6446
6447 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6448 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6449
6450 armnn::ConstantQueueDescriptor descriptor;
6451 descriptor.m_LayerOutput = &constantTensor;
6452
6453 armnn::WorkloadInfo info;
6454 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6455
6456 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6457
6458 outputHandle->Allocate();
6459
Derek Lambertif30f7d32019-04-09 10:25:02 +01006460 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006461 workload->Execute();
6462
6463 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6464 return result;
6465}
6466
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006467LayerTestResult<float, 4> ConstantTest(
6468 armnn::IWorkloadFactory& workloadFactory,
6469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006470{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006471 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006472}
6473
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006474LayerTestResult<uint8_t, 4> ConstantTestUint8(
6475 armnn::IWorkloadFactory& workloadFactory,
6476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006477{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006478 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006479}
6480
Ferran Balaguerb2845652019-02-27 09:42:06 +00006481LayerTestResult<uint8_t, 3> MergerUint8DifferentQParamsTest(
6482 armnn::IWorkloadFactory& workloadFactory,
6483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6484{
6485 unsigned int outputWidth = 3;
6486 unsigned int outputHeight = 6;
6487 unsigned int outputChannels = 3;
6488
6489 unsigned int inputWidth1 = 3;
6490 unsigned int inputHeight1 = 6;
6491 unsigned int inputChannels1 = 2;
6492
6493 unsigned int inputWidth2 = 3;
6494 unsigned int inputHeight2 = 6;
6495 unsigned int inputChannels2 = 1;
6496
6497 // Defines the tensor descriptors.
6498 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6499 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6500 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6501
6502 // Quantized input1 tensor. Range [-3, 1]
6503 const float inputScale1 = 0.015686f;
6504 const int32_t inputOffset1 = 192;
6505
6506 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6507 {
6508 1, 2, 3,
6509 4, 5, 6,
6510 7, 8, 9,
6511 10, 11, 12,
6512 13, 14, 15,
6513 16, 17, 18,
6514
6515 19, 20, 21,
6516 22, 23, 24,
6517 25, 26, 27,
6518 28, 29, 30,
6519 31, 32, 33,
6520 34, 35, 36,
6521 })
6522 );
6523
6524 // Quatized input2 tensor. Range [-1, 4]
6525 const float inputScale2 = 0.019608f;
6526 const int32_t inputOffset2 = 50;
6527
6528 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6529 {
6530 37, 38, 39,
6531 40, 41, 42,
6532 43, 44, 45,
6533 46, 47, 48,
6534 49, 50, 51,
6535 52, 53, 54,
6536 })
6537 );
6538
6539 // Output has the same quantization parameters than input1,
6540 // so that only the requantization of input2 is required
6541 const float outputScale = 0.015686f;
6542 const int32_t outputOffset = 192;
6543
6544 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6545
6546 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6547 {
6548 1, 2, 3,
6549 4, 5, 6,
6550 7, 8, 9,
6551 10, 11, 12,
6552 13, 14, 15,
6553 16, 17, 18,
6554
6555 19, 20, 21,
6556 22, 23, 24,
6557 25, 26, 27,
6558 28, 29, 30,
6559 31, 32, 33,
6560 34, 35, 36,
6561
6562 176, 177, 178,
6563 179, 181, 182,
6564 183, 184, 186,
6565 187, 188, 189,
6566 191, 192, 193,
6567 195, 196, 197,
6568 })
6569 );
6570
6571 outputTensorInfo.SetQuantizationScale(outputScale);
6572 outputTensorInfo.SetQuantizationOffset(outputOffset);
6573 inputTensorInfo1.SetQuantizationScale(inputScale1);
6574 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6575 inputTensorInfo2.SetQuantizationScale(inputScale2);
6576 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6577
6578 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
6579 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6580
6581 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
6582 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6583
6584 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6585
6586 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6587
6588 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6589 subTensorsSupported ?
6590 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6591 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6592
6593 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6594 subTensorsSupported ?
6595 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6596 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6597
6598 armnn::MergerQueueDescriptor data;
6599 armnn::WorkloadInfo info;
6600 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6601 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6602 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6603
6604 data.m_ViewOrigins.push_back(window1);
6605 data.m_ViewOrigins.push_back(window2);
6606
6607 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6608
6609 inputHandle1->Allocate();
6610 inputHandle2->Allocate();
6611 outputHandle->Allocate();
6612
6613 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6614 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6615
Derek Lambertif30f7d32019-04-09 10:25:02 +01006616 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006617 workload->Execute();
6618
6619 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6620
6621 return ret;
6622}
6623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006624LayerTestResult<uint8_t, 3> MergerUint8Test(
6625 armnn::IWorkloadFactory& workloadFactory,
6626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006627{
surmeh013537c2c2018-05-18 16:31:43 +01006628 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006629 unsigned int outputHeight = 6;
6630 unsigned int outputChannels = 3;
6631
surmeh013537c2c2018-05-18 16:31:43 +01006632 unsigned int inputWidth1 = 3;
6633 unsigned int inputHeight1 = 6;
6634 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006635
surmeh013537c2c2018-05-18 16:31:43 +01006636 unsigned int inputWidth2 = 3;
6637 unsigned int inputHeight2 = 6;
6638 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006639
telsoa01c577f2c2018-08-31 09:22:23 +01006640 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006641 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6642 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6643 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006644
telsoa01c577f2c2018-08-31 09:22:23 +01006645 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006646 const float scale = 0.13497836f;
6647 const int32_t offset = -7;
6648
6649 outputTensorInfo.SetQuantizationScale(scale);
6650 outputTensorInfo.SetQuantizationOffset(offset);
6651 inputTensorInfo1.SetQuantizationScale(scale);
6652 inputTensorInfo1.SetQuantizationOffset(offset);
6653 inputTensorInfo2.SetQuantizationScale(scale);
6654 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006655
6656 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6657
6658 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006659 {
6660 1, 2, 3,
6661 4, 5, 6,
6662 7, 8, 9,
6663 10, 11, 12,
6664 13, 14, 15,
6665 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006666
surmeh013537c2c2018-05-18 16:31:43 +01006667 19, 20, 21,
6668 22, 23, 24,
6669 25, 26, 27,
6670 28, 29, 30,
6671 31, 32, 33,
6672 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006673
surmeh013537c2c2018-05-18 16:31:43 +01006674 37, 38, 39,
6675 40, 41, 42,
6676 43, 44, 45,
6677 46, 47, 48,
6678 49, 50, 51,
6679 52, 53, 54,
6680 })
telsoa014fcda012018-03-09 14:13:49 +00006681 );
6682
telsoa014fcda012018-03-09 14:13:49 +00006683 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6684 {
surmeh013537c2c2018-05-18 16:31:43 +01006685 1, 2, 3,
6686 4, 5, 6,
6687 7, 8, 9,
6688 10, 11, 12,
6689 13, 14, 15,
6690 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006691
surmeh013537c2c2018-05-18 16:31:43 +01006692 19, 20, 21,
6693 22, 23, 24,
6694 25, 26, 27,
6695 28, 29, 30,
6696 31, 32, 33,
6697 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006698 })
6699 );
6700
6701 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6702 {
surmeh013537c2c2018-05-18 16:31:43 +01006703 37, 38, 39,
6704 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006705 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006706 46, 47, 48,
6707 49, 50, 51,
6708 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006709 })
6710 );
6711
telsoa01c577f2c2018-08-31 09:22:23 +01006712 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00006713 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
6714
telsoa01c577f2c2018-08-31 09:22:23 +01006715 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00006716 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
6717
telsoa014fcda012018-03-09 14:13:49 +00006718
6719 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6720
6721 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6722
6723 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6724 subTensorsSupported ?
6725 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6726 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6727
6728 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6729 subTensorsSupported ?
6730 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6731 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6732
telsoa014fcda012018-03-09 14:13:49 +00006733
6734 armnn::MergerQueueDescriptor data;
6735 armnn::WorkloadInfo info;
6736 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6737 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006738 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6739
6740 data.m_ViewOrigins.push_back(window1);
6741 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006742
6743 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
6744
6745 inputHandle1->Allocate();
6746 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006747 outputHandle->Allocate();
6748
6749 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6750 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006751
Derek Lambertif30f7d32019-04-09 10:25:02 +01006752 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006753 workload->Execute();
6754
6755 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6756
6757 return ret;
6758}
6759
telsoa014fcda012018-03-09 14:13:49 +00006760
surmeh01bceff2f2018-03-29 16:29:27 +01006761namespace
telsoa014fcda012018-03-09 14:13:49 +00006762{
Sadik Armagan2999a022019-04-09 14:20:12 +01006763template <typename T>
6764LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006765 armnn::IWorkloadFactory& workloadFactory,
6766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6767 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006768 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006769 float scale0,
6770 int32_t offset0,
6771 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006772 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006773 float scale1,
6774 int32_t offset1,
6775 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006776 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006777 float outScale,
6778 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006779{
Sadik Armagan2999a022019-04-09 14:20:12 +01006780 auto dataType = (std::is_same<T, uint8_t>::value ?
6781 armnn::DataType::QuantisedAsymm8 :
6782 armnn::DataType::QuantisedSymm16);
6783
6784 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6785 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6786 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00006787
surmeh01bceff2f2018-03-29 16:29:27 +01006788 inputTensorInfo0.SetQuantizationScale(scale0);
6789 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006790
surmeh01bceff2f2018-03-29 16:29:27 +01006791 inputTensorInfo1.SetQuantizationScale(scale1);
6792 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006793
surmeh01bceff2f2018-03-29 16:29:27 +01006794 outputTensorInfo.SetQuantizationScale(outScale);
6795 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006796
Sadik Armagan2999a022019-04-09 14:20:12 +01006797 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6798 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006799
Sadik Armagan2999a022019-04-09 14:20:12 +01006800 LayerTestResult<T, 4> result(outputTensorInfo);
6801 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6802
6803 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6804 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6805 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6806
6807 armnn::AdditionQueueDescriptor data;
6808 armnn::WorkloadInfo info;
6809 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6810 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6811 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6812
6813 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6814
6815 inputHandle0->Allocate();
6816 inputHandle1->Allocate();
6817 outputHandle->Allocate();
6818
6819 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6820 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6821
Derek Lambertif30f7d32019-04-09 10:25:02 +01006822 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01006823 workload->Execute();
6824
6825 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6826
6827 return result;
6828}
6829} // anonymous namespace
6830
6831LayerTestResult<uint8_t, 4> AdditionUint8Test(
6832 armnn::IWorkloadFactory& workloadFactory,
6833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6834{
6835 const unsigned int shape0[] = { 1, 2, 2, 3 };
6836 const unsigned int shape1[] = { 1, 2, 2, 3 };
6837
6838 std::vector<uint8_t> input0(
6839 {
6840 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
6841 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
6842 });
6843
6844 std::vector<uint8_t> input1(
6845 {
6846 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6847 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6848 });
6849
6850 std::vector<uint8_t> output(
6851 {
6852 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6853 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6854 });
6855
6856 return AdditionQuantizeTestHelper(workloadFactory,
6857 memoryManager,
6858 shape0, input0, 7.0f, 3,
6859 shape1, input1, 7.0f, 3,
6860 shape0, output, 7.0f, 3);
6861}
6862
6863LayerTestResult<int16_t, 4> AdditionInt16Test(
6864 armnn::IWorkloadFactory& workloadFactory,
6865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6866{
6867 const unsigned int shape0[] = { 1, 2, 2, 3 };
6868 const unsigned int shape1[] = { 1, 2, 2, 3 };
6869
6870 std::vector<int16_t> input0(
6871 {
6872 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
6873 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
6874 });
6875
6876 std::vector<int16_t> input1(
6877 {
6878 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
6879 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
6880 });
6881
6882 std::vector<int16_t> output(
6883 {
6884 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
6885 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
6886 });
6887
6888 return AdditionQuantizeTestHelper(workloadFactory,
6889 memoryManager,
6890 shape0, input0, 7.0f, 0,
6891 shape1, input1, 7.0f, 0,
6892 shape0, output, 7.0f, 0);
6893}
6894
6895namespace
6896{
6897template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6898LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
6899 armnn::IWorkloadFactory& workloadFactory,
6900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6901 const unsigned int shape0[4],
6902 const std::vector<T> & values0,
6903 float scale0,
6904 int32_t offset0,
6905 const unsigned int shape1[4],
6906 const std::vector<T> & values1,
6907 float scale1,
6908 int32_t offset1,
6909 const unsigned int outShape[4],
6910 const std::vector<T> & outValues,
6911 float outScale,
6912 int32_t outOffset)
6913{
6914 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
6915 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
6916 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
6917
6918 inputTensorInfo0.SetQuantizationScale(scale0);
6919 inputTensorInfo0.SetQuantizationOffset(offset0);
6920
6921 inputTensorInfo1.SetQuantizationScale(scale1);
6922 inputTensorInfo1.SetQuantizationOffset(offset1);
6923
6924 outputTensorInfo.SetQuantizationScale(outScale);
6925 outputTensorInfo.SetQuantizationOffset(outOffset);
6926
6927 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6928 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6929
6930 LayerTestResult<T, 4> result(outputTensorInfo);
6931 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00006932
surmeh01bceff2f2018-03-29 16:29:27 +01006933 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00006934 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00006935 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6936
6937 armnn::MultiplicationQueueDescriptor data;
6938 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01006939 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6940 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00006941 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6942
6943 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
6944
surmeh01bceff2f2018-03-29 16:29:27 +01006945 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006946 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006947 outputHandle->Allocate();
6948
surmeh01bceff2f2018-03-29 16:29:27 +01006949 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006950 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006951
Derek Lambertif30f7d32019-04-09 10:25:02 +01006952 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006953 workload->Execute();
6954
6955 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6956
6957 return result;
6958}
surmeh01bceff2f2018-03-29 16:29:27 +01006959} // anonymous namespace
6960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006961LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
6962 armnn::IWorkloadFactory& workloadFactory,
6963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006964{
6965 unsigned int batchSize = 1;
6966 unsigned int channels = 2;
6967 unsigned int height = 2;
6968 unsigned int width = 3;
6969 const unsigned int shape[] = { batchSize, channels, height, width };
6970
telsoa01c577f2c2018-08-31 09:22:23 +01006971 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006972 std::vector<uint8_t> input0({
6973 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
6974 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
6975 });
6976
telsoa01c577f2c2018-08-31 09:22:23 +01006977 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006978 std::vector<uint8_t> input1({
6979 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
6980 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
6981 });
6982
telsoa01c577f2c2018-08-31 09:22:23 +01006983 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006984 std::vector<uint8_t> output(
6985 {
6986 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
6987 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
6988 });
6989
Sadik Armagan2999a022019-04-09 14:20:12 +01006990 // Scale/offset chosen to have output values out of range.
6991 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
6992 memoryManager,
6993 shape,
6994 input0,
6995 4.0f,
6996 1,
6997 shape,
6998 input1,
6999 3.0f,
7000 -2,
7001 shape,
7002 output,
7003 1366.255f,
7004 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007005}
7006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007007LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7008 armnn::IWorkloadFactory& workloadFactory,
7009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007010{
7011 const unsigned int shape0[] = { 1, 2, 2, 3 };
7012 const unsigned int shape1[] = { 1, 1, 1, 1 };
7013
7014 std::vector<uint8_t> input0({
7015 1, 2, 3, 4, 5, 6,
7016 7, 8, 9, 10, 11, 12
7017 });
7018
7019 std::vector<uint8_t> input1({2});
7020
7021 std::vector<uint8_t> output({
7022 2, 4, 6, 8, 10, 12,
7023 14, 16, 18, 20, 22, 24
7024 });
7025
Sadik Armagan2999a022019-04-09 14:20:12 +01007026 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7027 memoryManager,
7028 shape0,
7029 input0,
7030 1.0f,
7031 0,
7032 shape1,
7033 input1,
7034 1.0f,
7035 0,
7036 shape0,
7037 output,
7038 1.0f,
7039 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007040}
7041
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007042LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7043 armnn::IWorkloadFactory& workloadFactory,
7044 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007045{
7046 const unsigned int shape0[] = { 1, 2, 2, 3 };
7047 const unsigned int shape1[] = { 1, 1, 1, 3 };
7048
7049 std::vector<uint8_t> input0({
7050 1, 2, 3, 4, 5, 6,
7051 7, 8, 9, 10, 11, 12
7052 });
7053
7054 std::vector<uint8_t> input1({1, 2, 3});
7055
7056 std::vector<uint8_t> output({
7057 1, 4, 9, 4, 10, 18,
7058 7, 16, 27, 10, 22, 36
7059 });
7060
Sadik Armagan2999a022019-04-09 14:20:12 +01007061 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7062 memoryManager,
7063 shape0,
7064 input0,
7065 1.0f,
7066 0,
7067 shape1,
7068 input1,
7069 1.0f,
7070 0,
7071 shape0,
7072 output,
7073 1.0f,
7074 0);
7075}
7076
7077LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7078 armnn::IWorkloadFactory& workloadFactory,
7079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7080{
7081 const unsigned int shape[] = { 1, 2, 2, 3 };
7082
7083 std::vector<int16_t> input0(
7084 {
7085 6, 7, 8, 9, 10, 11,
7086 12, 13, 14, 15, 16, 17
7087 });
7088
7089 std::vector<int16_t> input1(
7090 {
7091 1, 2, 3, 4, 5, 6,
7092 7, 8, 9, 10, 11, 12
7093 });
7094
7095 std::vector<int16_t> output(
7096 {
7097 6, 14, 24, 36, 50, 66,
7098 84, 104, 126, 150, 176, 204
7099 });
7100
7101 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7102 memoryManager,
7103 shape,
7104 input0,
7105 1.0f,
7106 0,
7107 shape,
7108 input1,
7109 1.0f,
7110 0,
7111 shape,
7112 output,
7113 1.0f,
7114 0);
7115}
7116
7117LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7118 armnn::IWorkloadFactory& workloadFactory,
7119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7120{
7121 const unsigned int shape0[] = { 1, 2, 2, 3 };
7122 const unsigned int shape1[] = { 1, 1, 1, 1 };
7123
7124 std::vector<int16_t> input0(
7125 {
7126 1, 2, 3, 4, 5, 6,
7127 7, 8, 9, 10, 11, 12
7128 });
7129
7130 std::vector<int16_t> input1({2});
7131
7132 std::vector<int16_t> output(
7133 {
7134 2, 4, 6, 8, 10, 12,
7135 14, 16, 18, 20, 22, 24
7136 });
7137
7138 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7139 memoryManager,
7140 shape0,
7141 input0,
7142 1.0f,
7143 0,
7144 shape1,
7145 input1,
7146 1.0f,
7147 0,
7148 shape0,
7149 output,
7150 1.0f,
7151 0);
7152}
7153
7154LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7155 armnn::IWorkloadFactory& workloadFactory,
7156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7157{
7158 const unsigned int shape0[] = { 1, 2, 2, 3 };
7159 const unsigned int shape1[] = { 1, 1, 1, 3 };
7160
7161 std::vector<int16_t> input0(
7162 {
7163 1, 2, 3, 4, 5, 6,
7164 7, 8, 9, 10, 11, 12
7165 });
7166
7167 std::vector<int16_t> input1({1, 2, 3});
7168
7169 std::vector<int16_t> output(
7170 {
7171 1, 4, 9, 4, 10, 18,
7172 7, 16, 27, 10, 22, 36
7173 });
7174
7175 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7176 memoryManager,
7177 shape0,
7178 input0,
7179 1.0f,
7180 0,
7181 shape1,
7182 input1,
7183 1.0f,
7184 0,
7185 shape0,
7186 output,
7187 1.0f,
7188 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007189}
telsoa014fcda012018-03-09 14:13:49 +00007190
David Beckf195f032018-09-06 16:46:34 +01007191namespace
7192{
Sadik Armagan2999a022019-04-09 14:20:12 +01007193template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007194LayerTestResult<T, 4> SubtractionTestHelper(
7195 armnn::IWorkloadFactory& workloadFactory,
7196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7197 const unsigned int shape0[4],
7198 const std::vector<T>& values0,
7199 float scale0,
7200 int32_t offset0,
7201 const unsigned int shape1[4],
7202 const std::vector<T> & values1,
7203 float scale1,
7204 int32_t offset1,
7205 const unsigned int outShape[4],
7206 const std::vector<T> & outValues,
7207 float outScale,
7208 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007209{
Sadik Armagan2999a022019-04-09 14:20:12 +01007210 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7211 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7212 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007213
7214 inputTensorInfo0.SetQuantizationScale(scale0);
7215 inputTensorInfo0.SetQuantizationOffset(offset0);
7216
7217 inputTensorInfo1.SetQuantizationScale(scale1);
7218 inputTensorInfo1.SetQuantizationOffset(offset1);
7219
7220 outputTensorInfo.SetQuantizationScale(outScale);
7221 outputTensorInfo.SetQuantizationOffset(outOffset);
7222
7223 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7224 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7225
7226 LayerTestResult<T, 4> result(outputTensorInfo);
7227 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7228
7229 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7230 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7231 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7232
7233 armnn::SubtractionQueueDescriptor data;
7234 armnn::WorkloadInfo info;
7235 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7236 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7237 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7238
7239 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7240
7241 inputHandle0->Allocate();
7242 inputHandle1->Allocate();
7243 outputHandle->Allocate();
7244
7245 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7246 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7247
Derek Lambertif30f7d32019-04-09 10:25:02 +01007248 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007249 workload->Execute();
7250
7251 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7252
7253 return result;
7254}
7255} // anonymous namespace
7256
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007257LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7258 armnn::IWorkloadFactory& workloadFactory,
7259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007260{
7261 const unsigned int shape0[] = { 1, 1, 2, 2 };
7262 const unsigned int shape1[] = { 1, 1, 2, 2 };
7263
7264 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7265 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7266 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7267
Sadik Armagan2999a022019-04-09 14:20:12 +01007268 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7269 memoryManager,
7270 shape0, input0, 0.5f, 2,
7271 shape1, input1, 1.0f, 0,
7272 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007273}
7274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007275LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7276 armnn::IWorkloadFactory& workloadFactory,
7277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007278{
7279 const unsigned int shape0[] = { 1, 1, 2, 2 };
7280 const unsigned int shape1[] = { 1, 1, 1, 1 };
7281
7282 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7283 std::vector<uint8_t> input1({ 2 });
7284 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7285
Sadik Armagan2999a022019-04-09 14:20:12 +01007286 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7287 memoryManager,
7288 shape0, input0, 0.5f, 2,
7289 shape1, input1, 1.0f, 0,
7290 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007291}
7292
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007293LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7294 armnn::IWorkloadFactory& workloadFactory,
7295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007296{
7297 const unsigned int shape0[] = { 1, 1, 2, 2 };
7298 const unsigned int shape1[] = { 1, 1, 2, 1 };
7299
7300 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7301 std::vector<uint8_t> input1({ 2, 1 });
7302 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7303
Sadik Armagan2999a022019-04-09 14:20:12 +01007304 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7305 memoryManager,
7306 shape0, input0, 1.0f, 0,
7307 shape1, input1, 1.0f, 0,
7308 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007309}
7310
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007311LayerTestResult<float, 4> SubtractionTest(
7312 armnn::IWorkloadFactory& workloadFactory,
7313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007314{
7315 const unsigned int shape0[] = { 1, 1, 2, 2 };
7316 const unsigned int shape1[] = { 1, 1, 2, 2 };
7317
7318 std::vector<float> input0({ 1, 2, 3, 4 });
7319 std::vector<float> input1({ 1, -1, 0, 2 });
7320 std::vector<float> output({ 0, 3, 3, 2 });
7321
Sadik Armagan2999a022019-04-09 14:20:12 +01007322 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7323 memoryManager,
7324 shape0, input0, 1.0f, 0,
7325 shape1, input1, 1.0f, 0,
7326 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007327}
7328
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007329LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7330 armnn::IWorkloadFactory& workloadFactory,
7331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007332{
7333 const unsigned int shape0[] = { 1, 1, 2, 2 };
7334 const unsigned int shape1[] = { 1, 1, 1, 1 };
7335
7336 std::vector<float> input0({ 1, 2, 3, 4 });
7337 std::vector<float> input1({ 10 });
7338 std::vector<float> output({ -9, -8, -7, -6 });
7339
Sadik Armagan2999a022019-04-09 14:20:12 +01007340 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7341 memoryManager,
7342 shape0, input0, 1.0f, 0,
7343 shape1, input1, 1.0f, 0,
7344 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007345}
7346
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007347LayerTestResult<float, 4> SubtractionBroadcastTest(
7348 armnn::IWorkloadFactory& workloadFactory,
7349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007350{
7351 const unsigned int shape0[] = { 1, 1, 2, 2 };
7352 const unsigned int shape1[] = { 1, 1, 1, 2 };
7353
7354 std::vector<float> input0({ 1, 2, 3, 4 });
7355 std::vector<float> input1({ 10, -5 });
7356 std::vector<float> output({ -9, 7, -7, 9 });
7357
Sadik Armagan2999a022019-04-09 14:20:12 +01007358 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7359 memoryManager,
7360 shape0, input0, 1.0f, 0,
7361 shape1, input1, 1.0f, 0,
7362 shape0, output, 1.0f, 0);
7363}
7364
7365LayerTestResult<int16_t, 4> SubtractionInt16Test(
7366 armnn::IWorkloadFactory& workloadFactory,
7367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7368{
7369 const unsigned int shape0[] = { 1, 1, 2, 2 };
7370 const unsigned int shape1[] = { 1, 1, 2, 2 };
7371
7372 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7373 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7374 std::vector<int16_t> output({ 3, 3, 5, 5 });
7375
7376 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7377 memoryManager,
7378 shape0, input0, 0.5f, 0,
7379 shape1, input1, 1.0f, 0,
7380 shape0, output, 1.0f, 0);
7381}
7382
7383LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7384 armnn::IWorkloadFactory& workloadFactory,
7385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7386{
7387 const unsigned int shape0[] = { 1, 1, 2, 2 };
7388 const unsigned int shape1[] = { 1, 1, 1, 1 };
7389
7390 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7391 std::vector<int16_t> input1({ 2 });
7392 std::vector<int16_t> output({ 3, 4, 5, 6 });
7393
7394 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7395 memoryManager,
7396 shape0, input0, 0.5f, 0,
7397 shape1, input1, 1.0f, 0,
7398 shape0, output, 1.0f, 0);
7399}
7400
7401LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7402 armnn::IWorkloadFactory& workloadFactory,
7403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7404{
7405 const unsigned int shape0[] = { 1, 1, 2, 2 };
7406 const unsigned int shape1[] = { 1, 1, 2, 1 };
7407
7408 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7409 std::vector<int16_t> input1({ 2, 1 });
7410 std::vector<int16_t> output({ 8, 11, 12, 15 });
7411
7412 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7413 memoryManager,
7414 shape0, input0, 1.0f, 0,
7415 shape1, input1, 1.0f, 0,
7416 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007417}
7418
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007419LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7420 armnn::IWorkloadFactory& workloadFactory,
7421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007422{
7423 constexpr unsigned int inputWidth = 4;
7424 constexpr unsigned int inputHeight = 4;
7425 constexpr unsigned int inputChannels = 1;
7426 constexpr unsigned int inputBatchSize = 1;
7427
7428 constexpr unsigned int outputWidth = inputWidth;
7429 constexpr unsigned int outputHeight = inputHeight;
7430 constexpr unsigned int outputChannels = inputChannels;
7431 constexpr unsigned int outputBatchSize = inputBatchSize;
7432
7433 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7434 armnn::DataType::QuantisedAsymm8);
7435 inputTensorInfo.SetQuantizationScale(1.5f);
7436 inputTensorInfo.SetQuantizationOffset(-3);
7437
7438 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7439 armnn::DataType::QuantisedAsymm8);
7440 outputTensorInfo.SetQuantizationScale(1.5f);
7441 outputTensorInfo.SetQuantizationOffset(-3);
7442
7443 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7444 1, 2, 3, 4,
7445 2, 3, 4, 5,
7446 3, 4, 5, 6,
7447 4, 5, 6, 7
7448 }));
7449
7450 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7451 result.outputExpected = input;
7452
7453 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7454 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7455
7456 armnn::ResizeBilinearQueueDescriptor descriptor;
7457 armnn::WorkloadInfo info;
7458 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7459 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7460
7461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7462
7463 inputHandle->Allocate();
7464 outputHandle->Allocate();
7465 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7466
Derek Lambertif30f7d32019-04-09 10:25:02 +01007467 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007468 workload->Execute();
7469
7470 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7471 return result;
7472}
7473
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007474LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7475 armnn::IWorkloadFactory& workloadFactory,
7476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007477{
7478 constexpr unsigned int inputWidth = 2;
7479 constexpr unsigned int inputHeight = 2;
7480 constexpr unsigned int inputChannels = 1;
7481 constexpr unsigned int inputBatchSize = 1;
7482
7483 constexpr unsigned int outputWidth = inputWidth / 2;
7484 constexpr unsigned int outputHeight = inputHeight / 2;
7485 constexpr unsigned int outputChannels = inputChannels;
7486 constexpr unsigned int outputBatchSize = inputBatchSize;
7487
7488 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7489 armnn::DataType::QuantisedAsymm8);
7490 inputTensorInfo.SetQuantizationScale(0.1567f);
7491 inputTensorInfo.SetQuantizationOffset(1);
7492
7493 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7494 armnn::DataType::QuantisedAsymm8);
7495 outputTensorInfo.SetQuantizationScale(0.1567f);
7496 outputTensorInfo.SetQuantizationOffset(1);
7497
7498 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7499 1, 255,
7500 200, 250
7501 }));
7502
7503 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7504 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007505 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007506 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7507 // the centre).
7508 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7509 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7510 1
7511 }));
7512
7513 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7514 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7515
7516 armnn::ResizeBilinearQueueDescriptor descriptor;
7517 armnn::WorkloadInfo info;
7518 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7519 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7520
7521 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7522
7523 inputHandle->Allocate();
7524 outputHandle->Allocate();
7525 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7526
Derek Lambertif30f7d32019-04-09 10:25:02 +01007527 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007528 workload->Execute();
7529
7530 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7531 return result;
7532}
7533
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007534LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7535 armnn::IWorkloadFactory& workloadFactory,
7536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007537{
7538 constexpr unsigned int inputWidth = 4;
7539 constexpr unsigned int inputHeight = 4;
7540 constexpr unsigned int inputChannels = 1;
7541 constexpr unsigned int inputBatchSize = 1;
7542
7543 constexpr unsigned int outputWidth = inputWidth / 2;
7544 constexpr unsigned int outputHeight = inputHeight / 2;
7545 constexpr unsigned int outputChannels = inputChannels;
7546 constexpr unsigned int outputBatchSize = inputBatchSize;
7547
7548 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7549 armnn::DataType::QuantisedAsymm8);
7550 inputTensorInfo.SetQuantizationScale(3.141592f);
7551 inputTensorInfo.SetQuantizationOffset(3);
7552
7553 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7554 armnn::DataType::QuantisedAsymm8);
7555 outputTensorInfo.SetQuantizationScale(3.141592f);
7556 outputTensorInfo.SetQuantizationOffset(3);
7557
7558 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7559 1, 2, 3, 4,
7560 2, 3, 4, 5,
7561 3, 4, 5, 6,
7562 4, 5, 6, 7
7563 }));
7564
7565 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7566 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7567 1, 3,
7568 3, 5
7569 }));
7570
7571 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7572 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7573
7574 armnn::ResizeBilinearQueueDescriptor descriptor;
7575 armnn::WorkloadInfo info;
7576 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7577 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7578
7579 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7580
7581 inputHandle->Allocate();
7582 outputHandle->Allocate();
7583 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7584
Derek Lambertif30f7d32019-04-09 10:25:02 +01007585 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007586 workload->Execute();
7587
7588 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7589 return result;
7590}
7591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007592LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7593 armnn::IWorkloadFactory& workloadFactory,
7594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007595{
7596 constexpr unsigned int inputWidth = 3;
7597 constexpr unsigned int inputHeight = 2;
7598 constexpr unsigned int inputChannels = 1;
7599 constexpr unsigned int inputBatchSize = 1;
7600
7601 constexpr unsigned int outputWidth = 2;
7602 constexpr unsigned int outputHeight = 1;
7603 constexpr unsigned int outputChannels = inputChannels;
7604 constexpr unsigned int outputBatchSize = inputBatchSize;
7605
7606 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7607 armnn::DataType::QuantisedAsymm8);
7608 inputTensorInfo.SetQuantizationScale(1.5f);
7609 inputTensorInfo.SetQuantizationOffset(-1);
7610
7611 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7612 armnn::DataType::QuantisedAsymm8);
7613 outputTensorInfo.SetQuantizationScale(1.5f);
7614 outputTensorInfo.SetQuantizationOffset(-1);
7615
7616 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7617 1, 2, 3, // 3.0, 4.5, 6.0
7618 5, 8, 13 // 9.0, 13.5, 21.0
7619 }));
7620
7621 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7622 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7623 1, 3 // 3.0, 5.25
7624 }));
7625
7626 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7627 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7628
7629 armnn::ResizeBilinearQueueDescriptor descriptor;
7630 armnn::WorkloadInfo info;
7631 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7632 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7633
7634 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7635
7636 inputHandle->Allocate();
7637 outputHandle->Allocate();
7638
7639 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7640
Derek Lambertif30f7d32019-04-09 10:25:02 +01007641 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007642 workload->Execute();
7643
7644 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7645 return result;
7646}
7647
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007648LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7649 armnn::IWorkloadFactory& workloadFactory,
7650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007651{
7652 constexpr unsigned int inputWidth = 2;
7653 constexpr unsigned int inputHeight = 3;
7654 constexpr unsigned int inputChannels = 1;
7655 constexpr unsigned int inputBatchSize = 1;
7656
7657 constexpr unsigned int outputWidth = 5;
7658 constexpr unsigned int outputHeight = 3;
7659 constexpr unsigned int outputChannels = inputChannels;
7660 constexpr unsigned int outputBatchSize = inputBatchSize;
7661
7662 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7663 armnn::DataType::QuantisedAsymm8);
7664 inputTensorInfo.SetQuantizationScale(0.010765f);
7665 inputTensorInfo.SetQuantizationOffset(7);
7666
7667 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7668 armnn::DataType::QuantisedAsymm8);
7669 outputTensorInfo.SetQuantizationScale(0.010132f);
7670 outputTensorInfo.SetQuantizationOffset(-18);
7671
7672 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7673 24, 228, // 0.183005, 2.379065,
7674 105, 128, // 1.05497, 1.302565
7675 230, 71 // 2.400595, 0.68896
7676 }));
7677
7678 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7679 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7680 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7681 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7682 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7683 }));
7684
7685 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7686 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7687
7688 armnn::ResizeBilinearQueueDescriptor descriptor;
7689 armnn::WorkloadInfo info;
7690 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7691 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7692
7693 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7694
7695 inputHandle->Allocate();
7696 outputHandle->Allocate();
7697 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7698
Derek Lambertif30f7d32019-04-09 10:25:02 +01007699 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007700 workload->Execute();
7701
7702 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7703 return result;
7704}
7705
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007706LayerTestResult<float, 2> Rsqrt2dTestCommon(
7707 armnn::IWorkloadFactory& workloadFactory,
7708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7709 const armnn::TensorInfo inputTensorInfo,
7710 const armnn::TensorInfo outputTensorInfo,
7711 std::vector<float> inputValues,
7712 std::vector<float> expectedOutputValues)
7713{
7714 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7715
7716 LayerTestResult<float, 2> result(outputTensorInfo);
7717 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7718
7719 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7720 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7721
7722 armnn::RsqrtQueueDescriptor descriptor;
7723
7724 armnn::WorkloadInfo info;
7725
7726 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7727 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7728
7729 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7730
7731 inputHandle->Allocate();
7732 outputHandle->Allocate();
7733
7734 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7735
Derek Lambertif30f7d32019-04-09 10:25:02 +01007736 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007737 workload->Execute();
7738
7739 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7740
7741 return result;
7742}
7743LayerTestResult<float, 2> Rsqrt2dTest(
7744 armnn::IWorkloadFactory& workloadFactory,
7745 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7746{
7747 const armnn::TensorShape inputShape{ 2, 2 };
7748 const armnn::TensorShape outputShape{ 2, 2 };
7749
7750 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7751 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7752
7753 std::vector<float> inputValues
7754 {
7755 1.f, 4.f,
7756 16.f, 25.f
7757 };
7758
7759 std::vector<float> expectedOutputValues
7760 {
7761 1.f, 0.5f,
7762 0.25f, 0.2f
7763 };
7764
7765 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7766 inputTensorInfo, outputTensorInfo,
7767 inputValues, expectedOutputValues);
7768}
7769
7770LayerTestResult<float, 3> Rsqrt3dTest(
7771 armnn::IWorkloadFactory& workloadFactory,
7772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7773{
7774 const armnn::TensorShape inputShape{ 3, 1, 2 };
7775 const armnn::TensorShape outputShape{ 3, 1, 2 };
7776
7777 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7778 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7779
7780 std::vector<float> inputValues
7781 {
7782 1.f, 4.f, 16.f,
7783 25.f, 64.f, 100.f
7784 };
7785
7786 std::vector<float> expectedOutputValues
7787 {
7788 1.f, 0.5f, 0.25f,
7789 0.2f, 0.125f, 0.1f
7790 };
7791
7792 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7793
7794 LayerTestResult<float, 3> result(outputTensorInfo);
7795 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7796
7797 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7798 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7799
7800 armnn::RsqrtQueueDescriptor descriptor;
7801
7802 armnn::WorkloadInfo info;
7803
7804 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7805 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7806
7807 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7808
7809 inputHandle->Allocate();
7810 outputHandle->Allocate();
7811
7812 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7813
Derek Lambertif30f7d32019-04-09 10:25:02 +01007814 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007815 workload->Execute();
7816
7817 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7818
7819 return result;
7820}
7821
7822LayerTestResult<float, 2> RsqrtZeroTest(
7823 armnn::IWorkloadFactory& workloadFactory,
7824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7825{
7826 const armnn::TensorShape inputShape{ 1, 2 };
7827 const armnn::TensorShape outputShape{ 1, 2 };
7828
7829 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7830 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7831
7832 std::vector<float> inputValues
7833 {
7834 0.f, -0.f
7835 };
7836
7837 std::vector<float> expectedOutputValues
7838 {
7839 INFINITY, -INFINITY
7840 };
7841
7842 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7843 inputTensorInfo, outputTensorInfo,
7844 inputValues, expectedOutputValues);
7845}
7846
7847LayerTestResult<float, 2> RsqrtNegativeTest(
7848 armnn::IWorkloadFactory& workloadFactory,
7849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7850{
7851 const armnn::TensorShape inputShape{ 1, 2 };
7852 const armnn::TensorShape outputShape{ 1, 2 };
7853
7854 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7855 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7856
7857 std::vector<float> inputValues
7858 {
7859 -25.f, -16.f
7860 };
7861
7862 std::vector<float> expectedOutputValues
7863 {
7864 -NAN, -NAN
7865 };
7866
7867 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7868 inputTensorInfo, outputTensorInfo,
7869 inputValues, expectedOutputValues);
7870}
7871
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007872LayerTestResult<float, 4> BatchNormTest(
7873 armnn::IWorkloadFactory& workloadFactory,
7874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007875{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007876 // BatchSize: 1
7877 // Channels: 2
7878 // Height: 3
7879 // Width: 2
7880
7881 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7882 std::vector<float> inputValues
7883 {
7884 // Batch 0, Channel 0, Height (3) x Width (2)
7885 1.f, 4.f,
7886 4.f, 2.f,
7887 1.f, 6.f,
7888
7889 // Batch 0, Channel 1, Height (3) x Width (2)
7890 1.f, 1.f,
7891 4.f, 1.f,
7892 -2.f, 4.f
7893 };
7894 std::vector<float> expectedOutputValues
7895 {
7896 // Batch 0, Channel 0, Height (3) x Width (2)
7897 1.f, 4.f,
7898 4.f, 2.f,
7899 1.f, 6.f,
7900
7901 // Batch 0, Channel 1, Height (3) x Width (2)
7902 3.f, 3.f,
7903 4.f, 3.f,
7904 2.f, 4.f
7905 };
7906
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007907 return BatchNormTestImpl<armnn::DataType::Float32>(
7908 workloadFactory, memoryManager,
7909 inputOutputShape, inputValues, expectedOutputValues,
7910 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007911}
7912
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007913LayerTestResult<float, 4> BatchNormNhwcTest(
7914 armnn::IWorkloadFactory& workloadFactory,
7915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007916{
7917 // BatchSize: 1
7918 // Height: 3
7919 // Width: 2
7920 // Channels: 2
7921
7922 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7923 std::vector<float> inputValues
7924 {
7925 // Batch 0, Height 0, Width (2) x Channel (2)
7926 1.f, 1.f,
7927 4.f, 1.f,
7928
7929 // Batch 0, Height 1, Width (2) x Channel (2)
7930 4.f, 4.f,
7931 2.f, 1.f,
7932
7933 // Batch 0, Height 2, Width (2) x Channel (2)
7934 1.f, -2.f,
7935 6.f, 4.f
7936 };
7937 std::vector<float> expectedOutputValues
7938 {
7939 // Batch 0, Height 0, Width (2) x Channel (2)
7940 1.f, 3.f,
7941 4.f, 3.f,
7942
7943 // Batch 0, Height 1, Width (2) x Channel (2)
7944 4.f, 4.f,
7945 2.f, 3.f,
7946
7947 // Batch 0, Height 2, Width (2) x Channel (2)
7948 1.f, 2.f,
7949 6.f, 4.f
7950 };
7951
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007952 return BatchNormTestImpl<armnn::DataType::Float32>(
7953 workloadFactory, memoryManager,
7954 inputOutputShape, inputValues, expectedOutputValues,
7955 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007956}
7957
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007958LayerTestResult<uint8_t, 4> BatchNormUint8Test(
7959 armnn::IWorkloadFactory& workloadFactory,
7960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007961{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007962 // BatchSize: 1
7963 // Channels: 2
7964 // Height: 3
7965 // Width: 2
7966
7967 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7968 std::vector<float> inputValues
7969 {
7970 // Batch 0, Channel 0, Height (3) x Width (2)
7971 1.f, 4.f,
7972 4.f, 2.f,
7973 1.f, 6.f,
7974
7975 // Batch 0, Channel 1, Height (3) x Width (2)
7976 1.f, 1.f,
7977 4.f, 1.f,
7978 -2.f, 4.f
7979 };
7980 std::vector<float> expectedOutputValues
7981 {
7982 // Batch 0, Channel 0, Height (3) x Width (2)
7983 1.f, 4.f,
7984 4.f, 2.f,
7985 1.f, 6.f,
7986
7987 // Batch 0, Channel 1, Height (3) x Width (2)
7988 3.f, 3.f,
7989 4.f, 3.f,
7990 2.f, 4.f
7991 };
7992
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007993 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
7994 workloadFactory, memoryManager,
7995 inputOutputShape, inputValues, expectedOutputValues,
7996 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007997}
7998
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007999LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8000 armnn::IWorkloadFactory& workloadFactory,
8001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008002{
8003 // BatchSize: 1
8004 // Height: 3
8005 // Width: 2
8006 // Channels: 2
8007
8008 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8009 std::vector<float> inputValues
8010 {
8011 // Batch 0, Height 0, Width (2) x Channel (2)
8012 1.f, 1.f,
8013 4.f, 1.f,
8014
8015 // Batch 0, Height 1, Width (2) x Channel (2)
8016 4.f, 4.f,
8017 2.f, 1.f,
8018
8019 // Batch 0, Height 2, Width (2) x Channel (2)
8020 1.f, -2.f,
8021 6.f, 4.f
8022 };
8023 std::vector<float> expectedOutputValues
8024 {
8025 // Batch 0, Height 0, Width (2) x Channel (2)
8026 1.f, 3.f,
8027 4.f, 3.f,
8028
8029 // Batch 0, Height 1, Width (2) x Channel (2)
8030 4.f, 4.f,
8031 2.f, 3.f,
8032
8033 // Batch 0, Height 2, Width (2) x Channel (2)
8034 1.f, 2.f,
8035 6.f, 4.f
8036 };
8037
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008038 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8039 (workloadFactory, memoryManager,
8040 inputOutputShape, inputValues, expectedOutputValues,
8041 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008042}
8043
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008044LayerTestResult<uint8_t, 4> ConstantUint8Test(
8045 armnn::IWorkloadFactory& workloadFactory,
8046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008047{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008048 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008049}
8050
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008051LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8052 armnn::IWorkloadFactory& workloadFactory,
8053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008054{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008055 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008056}
8057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008058LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8059 armnn::IWorkloadFactory& workloadFactory,
8060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008061{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008062 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008063}
8064
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008065LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8066 armnn::IWorkloadFactory& workloadFactory,
8067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008068{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008069 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008070}
8071
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008072LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8073 armnn::IWorkloadFactory& workloadFactory,
8074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008075{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008076 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8077 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008078}
8079
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008080LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8081 armnn::IWorkloadFactory& workloadFactory,
8082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008083{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008084 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8085 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008086}
8087
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008088LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8089 armnn::IWorkloadFactory& workloadFactory,
8090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008091{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008092 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008093}
8094
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008095LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8096 armnn::IWorkloadFactory& workloadFactory,
8097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008098{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008099 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008100}
8101
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008102LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8103 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8105 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008106{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008107 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8108 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008109}
8110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008111LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8112 armnn::IWorkloadFactory& workloadFactory,
8113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008114{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008115 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008116}
8117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008118LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8119 armnn::IWorkloadFactory& workloadFactory,
8120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008122 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8123 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008124}
8125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008126LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8127 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8129 bool useSubtensor)
8130{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008131 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8132 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008133}
8134
8135LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8136 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008138{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008139 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008140}
8141
8142LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8143 armnn::IWorkloadFactory& workloadFactory,
8144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8145{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008146 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008147}
8148
8149LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8150 armnn::IWorkloadFactory& workloadFactory,
8151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8152{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008153 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008154}
8155
8156LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8157 armnn::IWorkloadFactory& workloadFactory,
8158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8159{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008160 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8161 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008162}
8163
8164LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8165 armnn::IWorkloadFactory& workloadFactory,
8166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8167{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008168 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8169 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008170}
8171
8172LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8173 armnn::IWorkloadFactory& workloadFactory,
8174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008176 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8177 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008178}
8179
8180LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8181 armnn::IWorkloadFactory& workloadFactory,
8182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8183{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008184 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8185 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008186}
8187
8188LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8189 armnn::IWorkloadFactory& workloadFactory,
8190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8191 bool useSubtensor)
8192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008193 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8194 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008195}
8196
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008197LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8198 armnn::IWorkloadFactory& workloadFactory,
8199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8200 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008201{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008202 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8203 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008204}
8205
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008206LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8207 armnn::IWorkloadFactory& workloadFactory,
8208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8209 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008210{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008211 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008212 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008213}
8214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008215LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8216 armnn::IWorkloadFactory& workloadFactory,
8217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8218 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008219{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008220 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8221 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008222}
8223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008224LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8225 armnn::IWorkloadFactory& workloadFactory,
8226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8227 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008228{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008229 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008230 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008231}
8232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008233LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8234 armnn::IWorkloadFactory& workloadFactory,
8235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008236 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008237{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008238 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008239}
8240
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008241LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8242 armnn::IWorkloadFactory& workloadFactory,
8243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008244 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008245{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008246 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008247}
8248
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008249LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8250 armnn::IWorkloadFactory& workloadFactory,
8251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008252 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008253{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008254 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008255}
8256
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008257LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8258 armnn::IWorkloadFactory& workloadFactory,
8259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008260 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008261{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008262 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008263 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008264}
8265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008266LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8267 armnn::IWorkloadFactory& workloadFactory,
8268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8269 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008270{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008271 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008272 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008273}
8274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008275LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8276 armnn::IWorkloadFactory& workloadFactory,
8277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008278{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008279 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008280}
8281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008282LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8283 armnn::IWorkloadFactory& workloadFactory,
8284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008285{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008286 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8287 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008288}
8289
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008290LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8291 armnn::IWorkloadFactory& workloadFactory,
8292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008293 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008294{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008295 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008296}
8297
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008298LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8299 armnn::IWorkloadFactory& workloadFactory,
8300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008301 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008302{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008303 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008304}
8305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008306LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8307 armnn::IWorkloadFactory& workloadFactory,
8308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008309{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008310 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008311}
8312
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008313LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8314 armnn::IWorkloadFactory& workloadFactory,
8315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008316{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008317 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008318}
8319
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008320LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8321 armnn::IWorkloadFactory& workloadFactory,
8322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008323{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008324 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008325}
8326
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008327LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8328 armnn::IWorkloadFactory& workloadFactory,
8329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008330{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008331 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008332}
8333
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008334LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8335 armnn::IWorkloadFactory& workloadFactory,
8336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008337{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008338 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008339}
8340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008341LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8342 armnn::IWorkloadFactory& workloadFactory,
8343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008344{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008345 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008346}
8347
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008348LayerTestResult<float, 4> L2Pooling2dSize7Test(
8349 armnn::IWorkloadFactory& workloadFactory,
8350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008351{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008352 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008353}
8354
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008355LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8356 armnn::IWorkloadFactory& workloadFactory,
8357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008358{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008359 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008360}
8361
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008362LayerTestResult<float, 4> L2Pooling2dSize9Test(
8363 armnn::IWorkloadFactory& workloadFactory,
8364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008365{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008366 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008367}
8368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008369LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8370 armnn::IWorkloadFactory& workloadFactory,
8371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008372{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008373 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008374}
8375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008376LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8377 armnn::IWorkloadFactory& workloadFactory,
8378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008380 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008381}
8382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008383LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8384 armnn::IWorkloadFactory& workloadFactory,
8385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008386{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008387 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008388}
8389
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008390LayerTestResult<float, 4> ComparePooling2dTest(
8391 armnn::IWorkloadFactory& workloadFactory,
8392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8393 armnn::IWorkloadFactory& refWorkloadFactory,
8394 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008395{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008396 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008397 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008398}
8399
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008400LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8401 armnn::IWorkloadFactory& workloadFactory,
8402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8403 armnn::IWorkloadFactory& refWorkloadFactory,
8404 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008405{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008406 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008407 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008408}
8409
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008410LayerTestResult<float, 2> FullyConnectedLargeTest(
8411 armnn::IWorkloadFactory& workloadFactory,
8412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8413 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008414{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008415 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008416}
8417
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008418LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8419 armnn::IWorkloadFactory& workloadFactory,
8420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008421{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008422 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008423}
8424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008425LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8426 armnn::IWorkloadFactory& workloadFactory,
8427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008428{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008429 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8430 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008431}
8432
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008433LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8434 armnn::IWorkloadFactory& workloadFactory,
8435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008436{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008437 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008438}
8439
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008440LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8441 armnn::IWorkloadFactory& workloadFactory,
8442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008443{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008444 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8445 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008446}
8447
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008448LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8449 armnn::IWorkloadFactory& workloadFactory,
8450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008451{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008452 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008453}
8454
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008455LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8456 armnn::IWorkloadFactory& workloadFactory,
8457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008458{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008459 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8460 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008461}
8462
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008463LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8464 armnn::IWorkloadFactory& workloadFactory,
8465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008466{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008467 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8468 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008469}
8470
8471LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008472 armnn::IWorkloadFactory& workloadFactory,
8473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008474{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008475 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8476 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008477}
8478
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008479LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8480 armnn::IWorkloadFactory& workloadFactory,
8481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008483 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008484}
8485
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008486LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8487 armnn::IWorkloadFactory& workloadFactory,
8488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008489{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008490 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8491 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008492}
8493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008494LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8495 armnn::IWorkloadFactory& workloadFactory,
8496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008498 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008499}
8500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008501LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8502 armnn::IWorkloadFactory& workloadFactory,
8503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008504{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008505 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008506}
8507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008508LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8509 armnn::IWorkloadFactory& workloadFactory,
8510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008511{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008512 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008513}
8514
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008515LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8516 armnn::IWorkloadFactory& workloadFactory,
8517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008518{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008519 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008520}
8521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008522LayerTestResult<float, 4> SimplePermuteFloat32Test(
8523 armnn::IWorkloadFactory& workloadFactory,
8524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008525{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008526 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008527};
8528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008529LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8530 armnn::IWorkloadFactory& workloadFactory,
8531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008532{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008533 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008534};
surmeh01bceff2f2018-03-29 16:29:27 +01008535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008536LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8537 armnn::IWorkloadFactory& workloadFactory,
8538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008539{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008540 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008541};
8542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008543LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8544 armnn::IWorkloadFactory& workloadFactory,
8545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008546{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008547 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008548};
8549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008550LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8551 armnn::IWorkloadFactory& workloadFactory,
8552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008553{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008554 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008555};
8556
8557namespace
8558{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008559
narpra011e4c31d2018-09-28 11:07:51 +01008560template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008561LayerTestResult<T, OutputDim> MeanTestHelper(
8562 armnn::IWorkloadFactory& workloadFactory,
8563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8564 const unsigned int* inputShape,
8565 const std::vector<T>& inputData,
8566 const std::vector<unsigned int>& axis,
8567 bool keepDims,
8568 const unsigned int* outputShape,
8569 const std::vector<T>& outputData,
8570 float scale = 1.0f,
8571 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008572{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008573 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008574
8575 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8576 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8577
8578 inputTensorInfo.SetQuantizationScale(scale);
8579 inputTensorInfo.SetQuantizationOffset(offset);
8580
8581 outputTensorInfo.SetQuantizationScale(scale);
8582 outputTensorInfo.SetQuantizationOffset(offset);
8583
8584 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8585
8586 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8587 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8588
8589 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8590 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8591
8592 armnn::MeanQueueDescriptor data;
8593 data.m_Parameters.m_Axis = axis;
8594 data.m_Parameters.m_KeepDims = keepDims;
8595 armnn::WorkloadInfo info;
8596 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8597 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8598
8599 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8600
8601 inputHandle->Allocate();
8602 outputHandle->Allocate();
8603
8604 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8605
Derek Lambertif30f7d32019-04-09 10:25:02 +01008606 workload->PostAllocationConfigure();
narpra011e4c31d2018-09-28 11:07:51 +01008607 workload->Execute();
8608
8609 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8610
8611 return result;
8612}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008613
narpra011e4c31d2018-09-28 11:07:51 +01008614} // anonymous namespace
8615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008616LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8617 armnn::IWorkloadFactory& workloadFactory,
8618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008619{
8620 const unsigned int inputShape[] = { 3, 2 };
8621 const unsigned int outputShape[] = { 1 };
8622
8623 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8624 std::vector<uint8_t> output({ 2 });
8625
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008626 return MeanTestHelper<uint8_t, 2, 1>(
8627 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008628}
8629
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008630LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8631 armnn::IWorkloadFactory& workloadFactory,
8632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008633{
8634 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8635 const unsigned int outputShape[] = { 1, 1, 2 };
8636
8637 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8638 std::vector<uint8_t> output({ 2, 2 });
8639
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008640 return MeanTestHelper<uint8_t, 4, 3>(
8641 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008642}
8643
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008644LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8645 armnn::IWorkloadFactory& workloadFactory,
8646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008647{
8648 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8649 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8650
8651 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8652 std::vector<uint8_t> output({ 2, 2 });
8653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008654 return MeanTestHelper<uint8_t, 4, 4>(
8655 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008656}
8657
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008658LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8659 armnn::IWorkloadFactory& workloadFactory,
8660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008661{
8662 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8663 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8664
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008665 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008666 std::vector<uint8_t> output({ 1, 3, 5 });
8667
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008668 return MeanTestHelper<uint8_t, 4, 4>(
8669 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008670}
8671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008672LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8673 armnn::IWorkloadFactory& workloadFactory,
8674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008675{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008676 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008677 const unsigned int outputShape[] = { 2 };
8678
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008679 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8680 24 });
8681 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008682
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008683 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8684 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008685 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008686}
8687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008688LayerTestResult<float, 1> MeanFloatSimpleTest(
8689 armnn::IWorkloadFactory& workloadFactory,
8690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008691{
8692 const unsigned int inputShape[] = { 3, 2 };
8693 const unsigned int outputShape[] = { 1 };
8694
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008695 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8696 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008697
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008698 return MeanTestHelper<float, 2, 1>(
8699 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008700}
8701
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008702LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8703 armnn::IWorkloadFactory& workloadFactory,
8704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008705{
8706 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8707 const unsigned int outputShape[] = { 3, 1, 2 };
8708
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008709 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8710 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008711
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008712 return MeanTestHelper<float, 4, 3>(
8713 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008714}
8715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008716LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8717 armnn::IWorkloadFactory& workloadFactory,
8718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008719{
8720 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8721 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8722
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008723 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8724 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008726 return MeanTestHelper<float, 4, 4>(
8727 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008728}
8729
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008730LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8731 armnn::IWorkloadFactory& workloadFactory,
8732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008733{
8734 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8735 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8736
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008737 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8738 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008740 return MeanTestHelper<float, 4, 4>(
8741 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008742}
8743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008744LayerTestResult<float, 1> MeanVtsFloat1Test(
8745 armnn::IWorkloadFactory& workloadFactory,
8746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008747{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008748 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008749 const unsigned int outputShape[] = { 2 };
8750
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008751 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8752 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8753 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008754
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008755 return MeanTestHelper<float, 3, 1>(
8756 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008757}
8758
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008759LayerTestResult<float, 3> MeanVtsFloat2Test(
8760 armnn::IWorkloadFactory& workloadFactory,
8761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008762{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008763 const unsigned int inputShape[] = { 4, 3, 2 };
8764 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008765
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008766 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8767 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8768 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008769
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008770 return MeanTestHelper<float, 3, 3>(
8771 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008772}
8773
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008774LayerTestResult<float, 3> MeanVtsFloat3Test(
8775 armnn::IWorkloadFactory& workloadFactory,
8776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008777{
8778 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8779 const unsigned int outputShape[] = { 1, 2, 1 };
8780
8781 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8782 std::vector<float> output({ 1.5f, 3.5f });
8783
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008784 return MeanTestHelper<float, 4, 3>(
8785 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008786}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008788LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8789 armnn::IWorkloadFactory& workloadFactory,
8790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008791{
8792 // Create Initial Tensor
8793 // 1, 2, 3
8794 // 4, 5, 6
8795 // 7, 8, 9
8796
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008797 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8798 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008799
8800 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8801 {1, 2, 3,
8802 4, 5, 6,
8803 7, 8, 9
8804 });
8805
8806 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8807 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8808 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8809 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8810
8811 // Apply MaxPool poolSize = 1x1, stride=2x2
8812 // Result =
8813 // 1, 3
8814 // 7, 9
8815 armnn::Pooling2dDescriptor descriptor;
8816 descriptor.m_PoolHeight = 1;
8817 descriptor.m_PoolWidth = 1;
8818 descriptor.m_StrideX = 2;
8819 descriptor.m_StrideY = 2;
8820 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8821
8822 armnn::Pooling2dQueueDescriptor queueDescriptor;
8823 queueDescriptor.m_Parameters = descriptor;
8824 armnn::WorkloadInfo workloadInfo;
8825 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8826 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8827
8828 // Create the MaxPool
8829 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8830
8831 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8832 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8833 boost::multi_array<float, 4> resultMaxPool;
8834 resultMaxPool.resize(shape);
8835
8836
8837 // Create addition with another tensor the same size
8838 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8839 // with the initial tensor.
8840 // 12, 16
8841 // 24, 28
8842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008843 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8844 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008845
8846 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8847 {12, 16,
8848 24, 28,
8849 });
8850
8851 // Expected output tensor after MaxPool and Addition.
8852 LayerTestResult<float,4> addRet(addOutputTensorInfo);
8853 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
8854 {
8855 13, 19,
8856 31, 37
8857 }));
8858
8859 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
8860 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
8861
8862 armnn::AdditionQueueDescriptor data;
8863 armnn::WorkloadInfo info;
8864
8865 // Add the output of the MaxPool and the new tensor
8866 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
8867 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
8868 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
8869
8870 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
8871
8872 poolingInputHandle->Allocate();
8873 poolingOutputHandle->Allocate();
8874 addInputHandle->Allocate();
8875 addOutputHandle->Allocate();
8876
8877 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
8878 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
8879
8880 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
8881 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
8882
Derek Lambertif30f7d32019-04-09 10:25:02 +01008883 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008884 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01008885 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008886 addWorkload->Execute();
8887
8888 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
8889
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008890 return addRet;
8891}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008893LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
8894 armnn::IWorkloadFactory& workloadFactory,
8895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008896{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008897 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008898}
8899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008900LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
8901 armnn::IWorkloadFactory& workloadFactory,
8902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008903{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008904 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008905}
8906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008907LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
8908 armnn::IWorkloadFactory& workloadFactory,
8909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008910{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008911 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008912}
8913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
8915 armnn::IWorkloadFactory& workloadFactory,
8916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008917{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008918 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008919}
8920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008921LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
8922 armnn::IWorkloadFactory& workloadFactory,
8923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008925 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008926}
8927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008928LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
8929 armnn::IWorkloadFactory& workloadFactory,
8930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008931{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008932 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008933}
8934
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008935LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
8936 armnn::IWorkloadFactory& workloadFactory,
8937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008938{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008939 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008940}
8941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008942LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
8943 armnn::IWorkloadFactory& workloadFactory,
8944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008945{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008946 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008947}
8948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008949LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
8950 armnn::IWorkloadFactory& workloadFactory,
8951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008952{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008953 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008954}
8955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008956LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
8957 armnn::IWorkloadFactory& workloadFactory,
8958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008959{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008960 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008961}
8962
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008963LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
8964 armnn::IWorkloadFactory& workloadFactory,
8965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008966{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008967 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008968}
8969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008970LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
8971 armnn::IWorkloadFactory& workloadFactory,
8972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008974 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008975}
8976
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008977LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
8978 armnn::IWorkloadFactory& workloadFactory,
8979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008980{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008981 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008982}
8983
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008984LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
8985 armnn::IWorkloadFactory& workloadFactory,
8986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008987{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008988 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008989}
8990
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008991LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
8992 armnn::IWorkloadFactory& workloadFactory,
8993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008994{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008995 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008996}
8997
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008998LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
8999 armnn::IWorkloadFactory& workloadFactory,
9000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009001{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009002 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009003}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009004
9005namespace {
9006
9007template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009008LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9009 armnn::IWorkloadFactory &workloadFactory,
9010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9011 const armnn::DataLayout& dataLayout,
9012 const unsigned int *inputShape,
9013 const std::vector<T> &inputData,
9014 const std::vector<unsigned int> &blockShape,
9015 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9016 const unsigned int *outputShape,
9017 const std::vector<T> &outputData,
9018 float scale = 1.0f,
9019 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009020{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009021 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9022
9023 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9024 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9025
9026 inputTensorInfo.SetQuantizationScale(scale);
9027 inputTensorInfo.SetQuantizationOffset(offset);
9028
9029 outputTensorInfo.SetQuantizationScale(scale);
9030 outputTensorInfo.SetQuantizationOffset(offset);
9031
9032 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9033
9034 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9035 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9036
9037 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9038 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9039
9040 armnn::BatchToSpaceNdQueueDescriptor data;
9041 data.m_Parameters.m_DataLayout = dataLayout;
9042 data.m_Parameters.m_BlockShape = blockShape;
9043 data.m_Parameters.m_Crops = crops;
9044 armnn::WorkloadInfo info;
9045 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9046 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9047
9048 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9049
9050 inputHandle->Allocate();
9051 outputHandle->Allocate();
9052
9053 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9054
Derek Lambertif30f7d32019-04-09 10:25:02 +01009055 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009056 workload->Execute();
9057
9058 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9059
9060 return result;
9061}
9062
9063} // anonymous namespace
9064
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009065LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9066 armnn::IWorkloadFactory& workloadFactory,
9067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009068{
9069 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009070 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009071
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009072 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009073 // Batch 0, Height 0, Width (2) x Channel (1)
9074 1.0f, 3.0f,
9075 // Batch 0, Height 1, Width (2) x Channel (1)
9076 9.0f, 11.0f,
9077
9078
9079 // Batch 1, Height 0, Width (2) x Channel (1)
9080 2.0f, 4.0f,
9081 // Batch 1, Height 1, Width (2) x Channel (1)
9082 10.0f, 12.0f,
9083
9084
9085 // Batch 2, Height 0, Width (2) x Channel (1)
9086 5.0f, 7.0f,
9087 // Batch 2, Height 1, Width (2) x Channel (1)
9088 13.0f, 15.0f,
9089
9090 // Batch 3, Height 0, Width (2) x Channel (3)
9091 6.0f, 8.0f,
9092 // Batch 3, Height 1, Width (2) x Channel (1)
9093 14.0f, 16.0f
9094 });
9095
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009096 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009097 1.0f, 2.0f, 3.0f, 4.0f,
9098 5.0f, 6.0f, 7.0f, 8.0f,
9099 9.0f, 10.0f, 11.0f, 12.0f,
9100 13.0f, 14.0f, 15.0f, 16.0f
9101 });
9102
9103 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009104 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009105
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009106 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9107 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009108 crops, outputShape, expectedOutput);
9109}
9110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009111LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9112 armnn::IWorkloadFactory& workloadFactory,
9113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009114{
9115 const unsigned int inputShape[] = {4, 1, 1, 1};
9116 const unsigned int outputShape[] = {1, 2, 2, 1};
9117
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009118 std::vector<float> input({
9119 // Batch 0, Height 0, Width (2) x Channel (1)
9120 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009121 });
9122
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009123 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009124
9125 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009126 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009127
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009128 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9129 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9130 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009131}
9132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009133LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9134 armnn::IWorkloadFactory& workloadFactory,
9135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009136{
9137 const unsigned int inputShape[] = {4, 1, 1, 3};
9138 const unsigned int outputShape[] = {1, 2, 2, 3};
9139
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009140 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009141
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009142 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009143
9144 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009145 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009147 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9148 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9149 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009150}
9151
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009152LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9153 armnn::IWorkloadFactory& workloadFactory,
9154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9155{
9156 const unsigned int inputShape[] = {8, 1, 3, 1};
9157 const unsigned int outputShape[] = {2, 2, 4, 1};
9158
9159 std::vector<float> input({
9160 0.0f, 1.0f, 3.0f,
9161 0.0f, 9.0f, 11.0f,
9162 0.0f, 2.0f, 4.0f,
9163 0.0f, 10.0f, 12.0f,
9164 0.0f, 5.0f, 7.0f,
9165 0.0f, 13.0f, 15.0f,
9166 0.0f, 6.0f, 8.0f,
9167 0.0f, 14.0f, 16.0f
9168 });
9169
9170 std::vector<float> expectedOutput({
9171 1.0f, 2.0f, 3.0f, 4.0f,
9172 5.0f, 6.0f, 7.0f, 8.0f,
9173 9.0f, 10.0f, 11.0f, 12.0f,
9174 13.0f, 14.0f, 15.0f, 16.0f
9175 });
9176
9177 std::vector<unsigned int> blockShape({2, 2});
9178 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9179
9180 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9181 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9182 crops, outputShape, expectedOutput);
9183}
9184
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009185LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9186 armnn::IWorkloadFactory &workloadFactory,
9187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009188{
9189 const unsigned int inputShape[] = {4, 3, 1, 1};
9190 const unsigned int outputShape[] = {1, 3, 2, 2};
9191
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009192 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009193
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009194 std::vector<float> expectedOutput({
9195 // Batch 0, Channel 0, Height (2) x Width (2)
9196 1.0f, 4.0f,
9197 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009198
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009199 // Batch 0, Channel 1, Height (2) x Width (2)
9200 2.0f, 5.0f,
9201 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009202
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009203 // Batch 0, Channel 2, Height (2) x Width (2)
9204 3.0f, 6.0f,
9205 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009206 });
9207
9208 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009209 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009211 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9212 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9213 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009214}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009215
Mike Kelly831faed2018-11-28 11:52:08 +00009216LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009217 armnn::IWorkloadFactory& workloadFactory,
9218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009219{
9220 const unsigned int inputShape[] = {4, 1, 1, 1};
9221 const unsigned int outputShape[] = {1, 1, 2, 2};
9222
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009223 std::vector<float> input({
9224 // Batch 0, Height 0, Width (2) x Channel (1)
9225 1.0f, 2.0f, 3.0f, 4.0f
9226 });
Mike Kelly831faed2018-11-28 11:52:08 +00009227
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009228 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009229
9230 std::vector<unsigned int> blockShape({2, 2});
9231 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9232
9233 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9234 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9235 crops, outputShape, expectedOutput);
9236}
9237
9238LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009239 armnn::IWorkloadFactory& workloadFactory,
9240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009241{
9242 const unsigned int inputShape[] = {4, 3, 1, 1};
9243 const unsigned int outputShape[] = {1, 3, 2, 2};
9244
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009245 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009246
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009247 std::vector<float> expectedOutput({
9248 // Batch 0, Channel 0, Height (2) x Width (2)
9249 1.0f, 7.0f,
9250 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009251
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009252 // Batch 0, Channel 1, Height (2) x Width (2)
9253 3.0f, 9.0f,
9254 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009255
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009256 // Batch 0, Channel 2, Height (2) x Width (2)
9257 5.0f, 11.0f,
9258 6.0f, 12.0f,
9259 });
Mike Kelly831faed2018-11-28 11:52:08 +00009260
9261 std::vector<unsigned int> blockShape({2, 2});
9262 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9263
9264 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9265 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9266 crops, outputShape, expectedOutput);
9267}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009268
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009269LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9270 armnn::IWorkloadFactory& workloadFactory,
9271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009272{
9273 const unsigned int inputShape[] = {4, 2, 2, 1};
9274 const unsigned int outputShape[] = {1, 4, 4, 1};
9275
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009276 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9277 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009278
9279 std::vector<unsigned int> blockShape({2, 2});
9280 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9281
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009282 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9283 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009284}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009285
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009286LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9287 armnn::IWorkloadFactory& workloadFactory,
9288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9289{
9290 const unsigned int inputShape[] = {4, 1, 1, 1};
9291 const unsigned int outputShape[] = {1, 2, 2, 1};
9292
9293 std::vector<uint8_t> input({
9294 // Batch 0, Height 0, Width (2) x Channel (1)
9295 1, 2, 3, 4
9296 });
9297
9298 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9299
9300 std::vector<unsigned int> blockShape({2, 2});
9301 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9302
9303 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9304 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9305 crops, outputShape, expectedOutput);
9306}
9307
9308LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9309 armnn::IWorkloadFactory& workloadFactory,
9310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9311{
9312 const unsigned int inputShape[] = {4, 1, 1, 3};
9313 const unsigned int outputShape[] = {1, 2, 2, 3};
9314
9315 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9316
9317 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9318
9319 std::vector<unsigned int> blockShape({2, 2});
9320 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9321
9322 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9323 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9324 crops, outputShape, expectedOutput);
9325}
9326
9327
9328LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9329 armnn::IWorkloadFactory &workloadFactory,
9330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9331{
9332 const unsigned int inputShape[] = {4, 3, 1, 1};
9333 const unsigned int outputShape[] = {1, 3, 2, 2};
9334
9335 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9336
9337 std::vector<uint8_t> expectedOutput({
9338 // Batch 0, Channel 0, Height (2) x Width (2)
9339 1, 4,
9340 7, 10,
9341
9342 // Batch 0, Channel 1, Height (2) x Width (2)
9343 2, 5,
9344 8, 11,
9345
9346 // Batch 0, Channel 2, Height (2) x Width (2)
9347 3, 6,
9348 9, 12,
9349 });
9350
9351 std::vector<unsigned int> blockShape({2, 2});
9352 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9353
9354 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9355 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9356 crops, outputShape, expectedOutput);
9357}
9358
9359LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9360 armnn::IWorkloadFactory& workloadFactory,
9361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9362{
9363 const unsigned int inputShape[] = {4, 1, 1, 1};
9364 const unsigned int outputShape[] = {1, 1, 2, 2};
9365
9366 std::vector<uint8_t> input({
9367 // Batch 0, Height 0, Width (2) x Channel (1)
9368 1, 2, 3, 4
9369 });
9370
9371 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9372
9373 std::vector<unsigned int> blockShape({2, 2});
9374 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9375
9376 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9377 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9378 crops, outputShape, expectedOutput);
9379}
9380
9381LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9382 armnn::IWorkloadFactory& workloadFactory,
9383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9384{
9385 const unsigned int inputShape[] = {4, 3, 1, 1};
9386 const unsigned int outputShape[] = {1, 3, 2, 2};
9387
9388 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9389
9390 std::vector<uint8_t> expectedOutput({
9391 // Batch 0, Channel 0, Height (2) x Width (2)
9392 1, 7,
9393 2, 8,
9394
9395 // Batch 0, Channel 1, Height (2) x Width (2)
9396 3, 9,
9397 4, 10,
9398
9399 // Batch 0, Channel 2, Height (2) x Width (2)
9400 5, 11,
9401 6, 12,
9402 });
9403
9404 std::vector<unsigned int> blockShape({2, 2});
9405 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9406
9407 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9408 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9409 crops, outputShape, expectedOutput);
9410}
9411
9412LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9413 armnn::IWorkloadFactory& workloadFactory,
9414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9415{
9416 const unsigned int inputShape[] = {8, 1, 1, 3};
9417 const unsigned int outputShape[] = {2, 1, 2, 4};
9418
9419 std::vector<uint8_t> input({
9420 0, 1, 3, 0, 9, 11,
9421 0, 2, 4, 0, 10, 12,
9422 0, 5, 7, 0, 13, 15,
9423 0, 6, 8, 0, 14, 16
9424 });
9425
9426 std::vector<uint8_t> expectedOutput({
9427 1, 2, 3, 4,
9428 5, 6, 7, 8,
9429 9, 10, 11, 12,
9430 13, 14, 15, 16
9431 });
9432
9433 std::vector<unsigned int> blockShape({2, 2});
9434 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9435
9436 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9437 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9438 crops, outputShape, expectedOutput);
9439}
9440
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009441LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9442 armnn::IWorkloadFactory& workloadFactory,
9443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9444{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009445 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009446}
9447
9448LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9449 armnn::IWorkloadFactory& workloadFactory,
9450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9451{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009452 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009453}
9454
9455LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9456 armnn::IWorkloadFactory& workloadFactory,
9457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9458{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009459 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009460}
9461
9462LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9463 armnn::IWorkloadFactory& workloadFactory,
9464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9465{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009466 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009467}
9468
9469LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9470 armnn::IWorkloadFactory& workloadFactory,
9471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9472{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009473 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009474}
9475
9476LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9477 armnn::IWorkloadFactory& workloadFactory,
9478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009480 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009481}
9482
9483LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9484 armnn::IWorkloadFactory& workloadFactory,
9485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009487 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009488}
9489
9490LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9491 armnn::IWorkloadFactory& workloadFactory,
9492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9493{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009494 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009495}
9496
9497LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9498 armnn::IWorkloadFactory& workloadFactory,
9499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9500{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009501 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009502}
9503
9504LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9505 armnn::IWorkloadFactory& workloadFactory,
9506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009508 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009509}
9510
9511LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9512 armnn::IWorkloadFactory& workloadFactory,
9513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9514{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009515 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009516}
9517
9518LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9519 armnn::IWorkloadFactory& workloadFactory,
9520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9521{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009522 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009523}
9524
9525LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9526 armnn::IWorkloadFactory& workloadFactory,
9527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9528{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009529 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009530}
9531
9532LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9533 armnn::IWorkloadFactory& workloadFactory,
9534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9535{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009536 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009537}
9538
9539LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9540 armnn::IWorkloadFactory& workloadFactory,
9541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9542{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009543 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009544}
9545
9546LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9547 armnn::IWorkloadFactory& workloadFactory,
9548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9549{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009550 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009551}
9552
9553LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9554 armnn::IWorkloadFactory& workloadFactory,
9555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9556{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009557 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009558}
9559
9560LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9561 armnn::IWorkloadFactory& workloadFactory,
9562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9563{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009564 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009565}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009566
9567LayerTestResult<float, 4> Debug4DFloat32Test(
9568 armnn::IWorkloadFactory& workloadFactory,
9569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9570{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009571 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009572}
9573
9574LayerTestResult<float, 3> Debug3DFloat32Test(
9575 armnn::IWorkloadFactory& workloadFactory,
9576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9577{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009578 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009579}
9580
9581LayerTestResult<float, 2> Debug2DFloat32Test(
9582 armnn::IWorkloadFactory& workloadFactory,
9583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9584{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009585 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009586}
9587
9588LayerTestResult<float, 1> Debug1DFloat32Test(
9589 armnn::IWorkloadFactory& workloadFactory,
9590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9591{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009592 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009593}
9594
9595LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9596 armnn::IWorkloadFactory& workloadFactory,
9597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9598{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009599 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009600}
9601
9602LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9603 armnn::IWorkloadFactory& workloadFactory,
9604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9605{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009606 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009607}
9608
9609LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9610 armnn::IWorkloadFactory& workloadFactory,
9611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9612{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009613 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009614}
9615
9616LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9617 armnn::IWorkloadFactory& workloadFactory,
9618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009620 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009621}
Matteo Martincigh49124022019-01-11 13:25:59 +00009622
narpra014951d842019-01-18 16:53:53 +00009623LayerTestResult<float, 1> Gather1DParamsFloatTest(
9624 armnn::IWorkloadFactory& workloadFactory,
9625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9626{
9627 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9628}
9629
9630LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9631 armnn::IWorkloadFactory& workloadFactory,
9632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9633{
9634 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9635}
9636
9637LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9638 armnn::IWorkloadFactory& workloadFactory,
9639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9640{
9641 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9642}
9643
9644LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9645 armnn::IWorkloadFactory& workloadFactory,
9646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9647{
9648 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9649}
9650
9651LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9652 armnn::IWorkloadFactory& workloadFactory,
9653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9654{
9655 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9656}
9657
9658LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9659 armnn::IWorkloadFactory& workloadFactory,
9660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9661{
9662 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9663 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009664}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009665
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009666LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009667 armnn::IWorkloadFactory& workloadFactory,
9668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9669{
9670 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9671}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009672
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009673LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9674 armnn::IWorkloadFactory& workloadFactory,
9675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9676{
9677 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9678}
9679
9680LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9681 armnn::IWorkloadFactory& workloadFactory,
9682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9683{
9684 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9685}
9686
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009687LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9688 armnn::IWorkloadFactory& workloadFactory,
9689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9690{
9691 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9692}
9693
9694LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9695 armnn::IWorkloadFactory& workloadFactory,
9696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9697{
9698 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9699}
9700
9701LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9702 armnn::IWorkloadFactory& workloadFactory,
9703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9704{
9705 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9706}