blob: fb07f9fe0b8ad3a33d3aaecc49688c885fef95b2 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036#include "SplitterTestImpl.hpp"
37#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000038#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000039#include "NormTestImpl.hpp"
40#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000047
telsoa01c577f2c2018-08-31 09:22:23 +010048// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000049static std::vector<float> ConvInput3x8x16({
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
74});
75
telsoa01c577f2c2018-08-31 09:22:23 +010076// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000077static std::vector<float> Bias2({0, 2});
78
telsoa01c577f2c2018-08-31 09:22:23 +010079// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010081boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000082{
83 if(biasEnabled)
84 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000085 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010086 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000087 return bias;
88 }
89 else
90 {
91 return boost::multi_array<T, 1>();
92 }
93}
94
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000095template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000096LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
97 armnn::IWorkloadFactory& workloadFactory,
98 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
99 float qScale,
100 int32_t qOffset,
101 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000102 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000103{
telsoa01c577f2c2018-08-31 09:22:23 +0100104 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000105 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000106 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000109 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000110 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
111 QuantizedVector<T>(qScale, qOffset, {
112 1, 1, 1,
113 1, -1, 1,
114 1, 1, 1,
115 1, 1, 1,
116 1, 1, 1,
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 2, 2, 2,
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129
130
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136
137 1, 1, 1,
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0
148 })));
149
telsoa01c577f2c2018-08-31 09:22:23 +0100150 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000151 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000152 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
153 QuantizedVector<T>(qScale, qOffset, {
154 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
155 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
156 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
157 -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f,
160
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
165 })));
166
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000167 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
168 workloadFactory,
169 memoryManager,
170 input,
171 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100172 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000173 expectedOutput,
174 qScale,
175 qOffset,
176 layout);
telsoa014fcda012018-03-09 14:13:49 +0000177}
178
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000179template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
180 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000181LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
182 armnn::IWorkloadFactory& workloadFactory,
183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
184 float qScale,
185 int32_t qOffset,
186 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000187 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000188{
telsoa01c577f2c2018-08-31 09:22:23 +0100189 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000190
telsoa01c577f2c2018-08-31 09:22:23 +0100191 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000192 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000193 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
194
telsoa01c577f2c2018-08-31 09:22:23 +0100195 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000196 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000197 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
198 QuantizedVector<T>(qScale, qOffset, {
199 1, 1, 1,
200 1, -1, 1,
201 1, 1, 1,
202
203 0, 0, 0,
204 0, 0, 0,
205 0, 0, 0,
206
207 2, 2, 2,
208 2, 2, 2,
209 2, 2, 2,
210
211
212 0, 0, 0,
213 0, 0, 0,
214 0, 0, 0,
215
216 1, 1, 1,
217 1, 1, 1,
218 1, 1, 1,
219
220 0, 0, 0,
221 0, 0, 0,
222 0, 0, 0
223 })));
224
telsoa01c577f2c2018-08-31 09:22:23 +0100225 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000226 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000227 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
228 QuantizedVector<T>(qScale, qOffset, {
229 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
230 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
231 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
242 })));
243
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000244 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
245 workloadFactory,
246 memoryManager,
247 input,
248 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100249 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000250 expectedOutput,
251 qScale,
252 qOffset,
253 layout);
telsoa014fcda012018-03-09 14:13:49 +0000254}
255
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000256template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000257LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
258 armnn::IWorkloadFactory& workloadFactory,
259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
260 float qScale,
261 int32_t qOffset,
262 bool biasEnabled,
263 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100264{
265 // Use common single-batch 5x5 image.
266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000267 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100268 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
269 {
270 1, 5, 2, 3,
271 8, 7, 3, 6,
272 3, 3, 9, 1
273 });
274
275
276 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000277 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100278 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
279 4, 5, 6,
280 0, 0, 0,
281 3, 2, 1
282 });
283
284 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000285 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100286
287 const std::vector<float> outputData =
288 {
289 23, 41, 33, 21,
290 44, 65, 76, 52,
291 82, 85, 79, 42
292 };
293
294 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
295
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000296 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
297 workloadFactory,
298 memoryManager,
299 input,
300 kernel,
301 boost::multi_array<T, 1>(),
302 expectedOutput,
303 dataLayout,
304 qScale,
305 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100306}
307
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000309LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
310 armnn::IWorkloadFactory& workloadFactory,
311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
312 float qScale,
313 int32_t qOffset,
314 bool biasEnabled,
315 const armnn::DataLayout& dataLayout)
316{
317 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000318 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000319 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
320 {
321 1, 5, 2, 3, 5,
322 8, 7, 3, 6, 3,
323 3, 3, 9, 1, 9,
324 4, 1, 8, 1, 3,
325 6, 8, 1, 9, 2
326 });
327
328 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000329 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000330 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
331 {
332 4, 5, 6,
333 0, 0, 0,
334 3, 2, 1
335 });
336
337 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000338 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000339
340 const std::vector<T> outputData =
341 {
342 23, 33, 24,
343 91, 99, 48,
344 26, 50, 19
345 };
346
347 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
348
349 uint32_t padLeft = 1;
350 uint32_t padTop = 1;
351 uint32_t padRight = 1;
352 uint32_t padBottom = 1;
353 uint32_t strideX = 2;
354 uint32_t strideY = 2;
355
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000356 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
357 workloadFactory,
358 memoryManager,
359 input,
360 kernel,
361 boost::multi_array<T, 1>(),
362 expectedOutput,
363 dataLayout,
364 qScale,
365 qOffset,
366 padLeft,
367 padTop,
368 padRight,
369 padBottom,
370 strideX,
371 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000372}
373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000374LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
375 armnn::IWorkloadFactory& workloadFactory,
376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
377 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000378 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000380 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
381 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000382}
383
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000384LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
385 armnn::IWorkloadFactory& workloadFactory,
386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
387 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000388 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000389{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000390 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
391 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000392}
393
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000394LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
395 armnn::IWorkloadFactory& workloadFactory,
396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
397 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000398 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000399{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000400 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
401 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000402}
403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000404LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
405 armnn::IWorkloadFactory& workloadFactory,
406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
407 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000409 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
410 workloadFactory,
411 memoryManager,
412 0.f,
413 0,
414 biasEnabled,
415 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100416}
417
Mike Kelly7332ed82018-12-20 17:03:06 +0000418LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
419 armnn::IWorkloadFactory& workloadFactory,
420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
421 bool biasEnabled,
422 const armnn::DataLayout layout)
423{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000424 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
425 workloadFactory,
426 memoryManager,
427 0.f,
428 0,
429 biasEnabled,
430 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000431}
432
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000433LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
434 armnn::IWorkloadFactory& workloadFactory,
435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
436 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000437 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000438{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000439 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
440 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000441}
442
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100443LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
444 armnn::IWorkloadFactory& workloadFactory,
445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
446 bool biasEnabled,
447 const armnn::DataLayout layout)
448{
449return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
450 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
451}
452
453LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
454 armnn::IWorkloadFactory& workloadFactory,
455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456 bool biasEnabled,
457 const armnn::DataLayout layout)
458{
459 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
460 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
461}
462
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
464 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000465LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
466 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469 float qScale,
470 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000471{
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000473 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000474 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
475 QuantizedVector<T>(qScale, qOffset, {
476 11,21,31,
477 12,22,32,
478 13,23,33
479 })));
480
telsoa01c577f2c2018-08-31 09:22:23 +0100481 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000482 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000483 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
484 QuantizedVector<T>(qScale, qOffset, {
485 -11,-21,
486 -12,-22,
487 })));
488
telsoa01c577f2c2018-08-31 09:22:23 +0100489// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000490// Manually calculated like this:
491//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
492//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
493//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
494//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
495//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
496//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
497//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000498 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000499 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
500 QuantizedVector<T>(qScale, qOffset, {
501 0, 0, 0, 0, 0, 0,
502 -242, -594, -934, -372, 0, 0,
503 -495, -1190, -1850, -725, 0, 0,
504 -538, -1256, -1916, -748, 0, 0,
505 -273, -626, -946, -363, 0, 0,
506 0, 0, 0, 0, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0
509 })));
510
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000511 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
512 workloadFactory,
513 memoryManager,
514 input,
515 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100516 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000517 expectedOutput,
518 qScale,
519 qOffset,
520 layout,
521 1, // Padding left.
522 2, // Padding top.
523 3, // Padding right.
524 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000525}
526
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000527template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
528 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000529LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
530 armnn::IWorkloadFactory& workloadFactory,
531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000532 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000533 float qScale,
534 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000535{
telsoa01c577f2c2018-08-31 09:22:23 +0100536 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000537 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000538 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
539 QuantizedVector<T>(qScale, qOffset, {
540 11,21,31,41,51,
541 12,22,32,42,52,
542 13,23,33,43,53,
543 14,24,34,44,54,
544 15,25,35,45,55,
545 })));
546
telsoa01c577f2c2018-08-31 09:22:23 +0100547 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000549 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
550 QuantizedVector<T>(qScale, qOffset, {
551 -11,-21,-31,-41,
552 -12,-22,-32,-42,
553 -13,-23,-33,-43,
554 -14,-24,-34,-44,
555 })));
556
telsoa01c577f2c2018-08-31 09:22:23 +0100557 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000559 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
560 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
561 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000562 -7140, -10580, -13940, -9300, -5230,
563 -9590, -14120, -18520, -12290, -6860,
564 -9980, -14560, -18960, -12560, -7000,
565 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100566 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000567 })));
568
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000569 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
570 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000571 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000572 input,
573 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100574 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000575 expectedOutput,
576 qScale,
577 qOffset,
narpra015f703182018-10-26 16:24:58 +0100578 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100579 1, // Padding left.
580 1, // Padding top.
581 2, // Padding right.
582 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100583}
584
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000585template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
586 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000587LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
588 armnn::IWorkloadFactory& workloadFactory,
589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
590 float qScale,
591 int32_t qOffset,
592 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000593 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100594{
telsoa01c577f2c2018-08-31 09:22:23 +0100595 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000596 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100597 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
598 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
599 0, 1, 2, 3, 4,
600 5, 6, 7, 8, 9,
601 10, 11, 12, 13, 14,
602 15, 16, 17, 18, 19,
603 20, 21, 22, 23, 24,
604
605 25, 26, 27, 28, 29,
606 30, 31, 32, 33, 34,
607 35, 36, 37, 38, 39,
608 40, 41, 42, 43, 44,
609 45, 46, 47, 48, 49
610 })));
611
telsoa01c577f2c2018-08-31 09:22:23 +0100612 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000613 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100614 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
615 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
616 32, 31, 30, 29,
617 28, 27, 26, 25,
618 24, 23, 22, 21,
619 20, 19, 18, 17,
620
621 16, 15, 14, 13,
622 12, 11, 10, 9,
623 8, 7, 6, 5,
624 4, 3, 2, 1
625 })));
626
telsoa01c577f2c2018-08-31 09:22:23 +0100627 // Expected output is 1 batch of a 2-channel 5x5 image.
628 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000629 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100630 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
631 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
632 1062, 1580, 1850, 1530, 1117,
633 2140, 3108, 3500, 2842, 2042,
634 3580, 5068, 5460, 4342, 3062,
635 3618, 5072, 5390, 4248, 2971,
636 3074, 4282, 4510, 3533, 2457,
637 1550, 2284, 2362, 1955, 1428,
638 2910, 4206, 4342, 3528, 2536,
639 3390, 4886, 5022, 4068, 2916,
640 3566, 5056, 5182, 4133, 2922,
641 3100, 4352, 4452, 3517, 2465
642 })));
643
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000644 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
645 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000646 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100647 input,
648 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100649 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100650 expectedOutput,
651 qScale,
652 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100653 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100654 1, // Padding left.
655 1, // Padding top.
656 2, // Padding right.
657 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100658 1, // strideX
659 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000660}
661
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000662template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
663 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000664LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
665 armnn::IWorkloadFactory& workloadFactory,
666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
667 float qScale,
668 int32_t qOffset,
669 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000671 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100672 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
673 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
674 0, 25,
675 1, 26,
676 2, 27,
677 3, 28,
678 4, 29,
679
680 5, 30,
681 6, 31,
682 7, 32,
683 8, 33,
684 9, 34,
685
686 10, 35,
687 11, 36,
688 12, 37,
689 13, 38,
690 14, 39,
691
692 15, 40,
693 16, 41,
694 17, 42,
695 18, 43,
696 19, 44,
697
698 20, 45,
699 21, 46,
700 22, 47,
701 23, 48,
702 24, 49
703 })));
704
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000705 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100706 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
707 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000708 32, 31, 30, 29,
709 28, 27, 26, 25,
710 24, 23, 22, 21,
711 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100712
Matteo Martincigh747ef822018-12-18 09:26:39 +0000713 16, 15, 14, 13,
714 12, 11, 10, 9,
715 8, 7, 6, 5,
716 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100717 })));
718
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000719 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100720 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
721 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
722 1062, 1550,
723 1580, 2284,
724 1850, 2362,
725 1530, 1955,
726 1117, 1428,
727
728 2140, 2910,
729 3108, 4206,
730 3500, 4342,
731 2842, 3528,
732 2042, 2536,
733
734 3580, 3390,
735 5068, 4886,
736 5460, 5022,
737 4342, 4068,
738 3062, 2916,
739
740 3618, 3566,
741 5072, 5056,
742 5390, 5182,
743 4248, 4133,
744 2971, 2922,
745
746 3074, 3100,
747 4282, 4352,
748 4510, 4452,
749 3533, 3517,
750 2457, 2465
751 })));
752
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000753 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
754 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000755 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100756 input,
757 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100758 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100759 expectedOutput,
760 qScale,
761 qOffset,
762 1, // Padding left.
763 1, // Padding top.
764 2, // Padding right.
765 2, // Padding bottom.
766 1, // strideX
767 1); // strideY
768}
769
Bruno Goncalves22972f02019-04-26 21:03:24 -0300770template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
771 typename T = armnn::ResolveType<ArmnnType>>
772LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
773 armnn::IWorkloadFactory& workloadFactory,
774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
775 float qScale,
776 int32_t qOffset,
777 bool biasEnabled)
778{
779 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
780 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
781 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
782 0, 0, 0, 0, 0, 0, 0, 0, 0,
783 0, 0, 0, 0, 0, 0, 0, 0, 0,
784 0, 0, 0, 0, 0, 0, 0, 0, 0,
785 0, 0, 0, 1, 1, 1, 0, 0, 0,
786 0, 0, 0, 1, 1, 1, 0, 0, 0,
787 0, 0, 0, 1, 1, 1, 0, 0, 0,
788 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 0, 0, 0, 0, 0, 0, 0, 0, 0
791 })));
792
793 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
794 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
795 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
796 1, 2, 3,
797 4, 5, 6,
798 7, 8, 9
799 })));
800
801 uint32_t padLeft = 0;
802 uint32_t padTop = 0;
803 uint32_t padRight = 0;
804 uint32_t padBottom = 0;
805 uint32_t strideX = 1;
806 uint32_t strideY = 1;
807 uint32_t dilationX = 3;
808 uint32_t dilationY = 3;
809
810 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
811 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
812 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
813 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
814 5, 5, 5,
815 5, 5, 5,
816 5, 5, 5
817 })));
818
819 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
820 workloadFactory,
821 memoryManager,
822 input,
823 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100824 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -0300825 expectedOutput,
826 qScale,
827 qOffset,
828 padLeft,
829 padTop,
830 padRight,
831 padBottom,
832 strideX,
833 strideY,
834 dilationX,
835 dilationY);
836
837}
838
telsoa014fcda012018-03-09 14:13:49 +0000839LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000840Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
841 armnn::IWorkloadFactory& workloadFactory,
842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000843 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000844{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000845 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
846 <armnn::DataType::Float32, armnn::DataType::Float32>(
847 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000848}
849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
851 armnn::IWorkloadFactory& workloadFactory,
852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000853 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000854{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000856 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000857}
858
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000859LayerTestResult<float, 4> DepthwiseConvolution2dTest(
860 armnn::IWorkloadFactory& workloadFactory,
861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
862 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000863 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000864{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000865 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000866 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000867}
868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000869LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
870 armnn::IWorkloadFactory& workloadFactory,
871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
872 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100873{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000874 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
875 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100876}
877
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000878LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
879 armnn::IWorkloadFactory& workloadFactory,
880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
881 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000882 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000883{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000884 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000885 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000886}
887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000888LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
889 armnn::IWorkloadFactory& workloadFactory,
890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
891 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000892 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100893{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000894 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000895 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100896}
897
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000898LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
899 armnn::IWorkloadFactory& workloadFactory,
900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
901 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000902 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000903{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000904 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000905 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000906}
907
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000908LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
909 armnn::IWorkloadFactory& workloadFactory,
910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
911 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000912 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000913{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000914 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000915 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000916}
917
Bruno Goncalves22972f02019-04-26 21:03:24 -0300918LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
921{
922 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
923 workloadFactory,
924 memoryManager,
925 0.f,
926 0,
927 false);
928}
929
Ruomei Yan88d44b82019-05-23 14:29:06 +0100930LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
931 armnn::IWorkloadFactory& workloadFactory,
932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
933 bool biasEnabled,
934 const armnn::DataLayout layout)
935{
936 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
937 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
938}
939
940LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
941 armnn::IWorkloadFactory& workloadFactory,
942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
943 bool biasEnabled,
944 const armnn::DataLayout layout)
945{
946 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
947 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
948}
949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000950LayerTestResult<float, 4> Convolution1dTest(
951 armnn::IWorkloadFactory& workloadFactory,
952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
953 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000954{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000955 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
956 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000957}
958
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000959LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
960 armnn::IWorkloadFactory& workloadFactory,
961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
962 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000963{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000964 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
965 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000966}
967
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000968LayerTestResult<float,4> CompareConvolution2dTest(
969 armnn::IWorkloadFactory& workloadFactory,
970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
971 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000972{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000973 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
974 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000975}
976
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000977LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000978 armnn::IWorkloadFactory& workloadFactory,
979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
980 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000981 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000982{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000983 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
984 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000985}
986
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000987LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
988 armnn::IWorkloadFactory& workloadFactory,
989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
990 armnn::IWorkloadFactory& refWorkloadFactory,
991 const armnn::DataLayout layout)
992{
993 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
994 workloadFactory, memoryManager, refWorkloadFactory, layout);
995}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000996
997LayerTestResult<float,4> SimpleNormalizationAcrossTest(
998 armnn::IWorkloadFactory& workloadFactory,
999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001000{
1001 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1002 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001003 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001004}
1005
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001006LayerTestResult<float,4> SimpleNormalizationWithinTest(
1007 armnn::IWorkloadFactory& workloadFactory,
1008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001009{
1010 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1011 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001012 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001013}
1014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001015LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1016 armnn::IWorkloadFactory& workloadFactory,
1017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001018{
1019 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1020 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001021 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001022}
1023
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001024LayerTestResult<float,2> SimpleSoftmaxTest(
1025 armnn::IWorkloadFactory& workloadFactory,
1026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1027 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001028{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001029 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001030}
1031
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001032LayerTestResult<float,3> Simple3dSoftmaxTest(
1033 armnn::IWorkloadFactory& workloadFactory,
1034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1035 float beta)
1036{
1037 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1038}
1039
1040LayerTestResult<float,4> Simple4dSoftmaxTest(
1041 armnn::IWorkloadFactory& workloadFactory,
1042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1043 float beta)
1044{
1045 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1046}
1047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001048LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1049 armnn::IWorkloadFactory& workloadFactory,
1050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1051 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001053 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001054}
1055
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001056LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1057 armnn::IWorkloadFactory& workloadFactory,
1058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1059 float beta)
1060{
1061 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1062}
1063
1064LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1065 armnn::IWorkloadFactory& workloadFactory,
1066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1067 float beta)
1068{
1069 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1070}
1071
nikraj01248683f2019-05-29 16:46:50 +01001072LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1073 armnn::IWorkloadFactory& workloadFactory,
1074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1075 float beta)
1076{
1077 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1078}
1079
1080LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1081 armnn::IWorkloadFactory& workloadFactory,
1082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1083 float beta)
1084{
1085 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1086}
1087
1088LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1089 armnn::IWorkloadFactory& workloadFactory,
1090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1091 float beta)
1092{
1093 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1094}
1095
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001096LayerTestResult<float,4> CompareNormalizationTest(
1097 armnn::IWorkloadFactory& workloadFactory,
1098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1099 armnn::IWorkloadFactory& refWorkloadFactory,
1100 armnn::NormalizationAlgorithmChannel normChannel,
1101 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001102{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001103 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001104}
1105
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001106LayerTestResult<float,2> CompareSoftmaxTest(
1107 armnn::IWorkloadFactory& workloadFactory,
1108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001109 armnn::IWorkloadFactory& refWorkloadFactory,
1110 float beta)
1111{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001112 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1113 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001114}
1115
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001116LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1117 armnn::IWorkloadFactory& workloadFactory,
1118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001119 armnn::IWorkloadFactory& refWorkloadFactory,
1120 float beta)
1121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001122 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1123 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001124}
1125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001126std::vector<LayerTestResult<float,3>> SplitterTest(
1127 armnn::IWorkloadFactory& workloadFactory,
1128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001129{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001130 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001131}
1132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001133std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1134 armnn::IWorkloadFactory& workloadFactory,
1135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001136{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001137 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001138}
1139
Ruomei Yan25339c32019-05-28 16:48:20 +01001140std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1141 armnn::IWorkloadFactory& workloadFactory,
1142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1143{
1144 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1145}
1146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001147LayerTestResult<float, 3> CopyViaSplitterTest(
1148 armnn::IWorkloadFactory& workloadFactory,
1149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001150{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001151 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001152}
1153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001154LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1155 armnn::IWorkloadFactory& workloadFactory,
1156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001157{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001158 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001159}
1160
Ruomei Yan25339c32019-05-28 16:48:20 +01001161LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1162 armnn::IWorkloadFactory& workloadFactory,
1163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1164{
1165 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1166}
1167
telsoa01c577f2c2018-08-31 09:22:23 +01001168LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001169 armnn::IWorkloadFactory& workloadFactory,
1170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001171{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001172 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001173 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1174 { 2., 3., 3., 4. }));
1175
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001176 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001177 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1178 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1179 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001180 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001181 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001182}
1183
1184LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001185 armnn::IWorkloadFactory& workloadFactory,
1186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001187{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001188 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001189 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1190 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1191 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1192
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001193 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001194 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1195 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1196 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1197 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1198 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1199 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1200 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1201 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001202 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1203 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001204}
1205
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001206LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1207 armnn::IWorkloadFactory& workloadFactory,
1208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001209{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001210 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001211 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1212 {2., 3., 3., 4.}));
1213
1214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001215 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001216 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1217 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1218 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1219
Conor Kennedyb9971c92019-05-07 07:14:23 +01001220 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001221 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001222}
1223
Conor Kennedyb9971c92019-05-07 07:14:23 +01001224LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1225 armnn::IWorkloadFactory& workloadFactory,
1226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1227{
1228 const float qScale = 1.0f;
1229 const int32_t qOffset = 0;
1230
1231 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1232 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1233
1234 armnn::TensorInfo inputDesc({2, 2}, datatype);
1235 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1236 std::vector<float>{2., 3., 3., 4.}));
1237
1238 armnn::TensorInfo outputDesc({2, 4}, datatype);
1239 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1240 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1241 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1242
1243 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1244 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1245
1246}
1247
1248LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1249 armnn::IWorkloadFactory& workloadFactory,
1250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1251{
1252 const float qScale = 1.0f;
1253 const int32_t qOffset = 0;
1254
1255 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1256 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1257
1258 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1259 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1260 std::vector<float>({ 2., 3., 3., 4. })));
1261
1262 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1263 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1264 qOffset, std::vector<float>(
1265 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1266 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1267
1268 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1269 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1270}
1271
1272LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1273 armnn::IWorkloadFactory& workloadFactory,
1274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1275{
1276 const float qScale = 2.0f;
1277 const int32_t qOffset = 0;
1278
1279 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1280 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1281
1282 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1283 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1284 qOffset, std::vector<float>(
1285 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1286 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1287
1288 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1289 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1290 qOffset, std::vector<float>(
1291 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1292 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1293 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1294 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1295 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1296 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1297
1298 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1299 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1300}
1301
1302LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1303 armnn::IWorkloadFactory& workloadFactory,
1304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1305{
1306 const float qScale = 1.0f;
1307 const int32_t qOffset = 0;
1308
1309 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1310
1311 armnn::TensorInfo inputDesc({2, 2}, datatype);
1312 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1313 qOffset, std::vector<float>{2., 3., 3., 4.}));
1314
1315 armnn::TensorInfo outputDesc({2, 4}, datatype);
1316 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1317 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1318 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1319
1320 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1321 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1322}
1323
Jim Flynn4ed6c832019-05-20 11:02:46 +01001324LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001325 armnn::IWorkloadFactory& workloadFactory,
1326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001327{
surmeh013537c2c2018-05-18 16:31:43 +01001328 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001329 unsigned int outputHeight = 6;
1330 unsigned int outputChannels = 3;
1331
surmeh013537c2c2018-05-18 16:31:43 +01001332 unsigned int inputWidth1 = 3;
1333 unsigned int inputHeight1 = 6;
1334 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001335
surmeh013537c2c2018-05-18 16:31:43 +01001336 unsigned int inputWidth2 = 3;
1337 unsigned int inputHeight2 = 6;
1338 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001339
telsoa01c577f2c2018-08-31 09:22:23 +01001340 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001341 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1342 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1343 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001344
1345 LayerTestResult<float,3> ret(outputTensorInfo);
1346
telsoa014fcda012018-03-09 14:13:49 +00001347 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001348 {
1349 1.0f, 2.0f, 3.0f,
1350 4.0f, 5.0f, 6.0f,
1351 7.0f, 8.0f, 9.0f,
1352 10.0f, 11.0f, 12.0f,
1353 13.0f, 14.0f, 15.0f,
1354 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001355
surmeh013537c2c2018-05-18 16:31:43 +01001356 19.0f, 20.0f, 21.0f,
1357 22.0f, 23.0f, 24.0f,
1358 25.0f, 26.0f, 27.0f,
1359 28.0f, 29.0f, 30.0f,
1360 31.0f, 32.0f, 33.0f,
1361 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001362
surmeh013537c2c2018-05-18 16:31:43 +01001363 37.0f, 38.0f, 39.0f,
1364 40.0f, 41.0f, 42.0f,
1365 43.0f, 44.0f, 45.0f,
1366 46.0f, 47.0f, 48.0f,
1367 49.0f, 50.0f, 51.0f,
1368 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001369 })
1370 );
1371
telsoa014fcda012018-03-09 14:13:49 +00001372 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1373 {
surmeh013537c2c2018-05-18 16:31:43 +01001374 1.0f, 2.0f, 3.0f,
1375 4.0f, 5.0f, 6.0f,
1376 7.0f, 8.0f, 9.0f,
1377 10.0f, 11.0f, 12.0f,
1378 13.0f, 14.0f, 15.0f,
1379 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001380
surmeh013537c2c2018-05-18 16:31:43 +01001381 19.0f, 20.0f, 21.0f,
1382 22.0f, 23.0f, 24.0f,
1383 25.0f, 26.0f, 27.0f,
1384 28.0f, 29.0f, 30.0f,
1385 31.0f, 32.0f, 33.0f,
1386 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001387 })
1388 );
1389
1390 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1391 {
surmeh013537c2c2018-05-18 16:31:43 +01001392 37.0f, 38.0f, 39.0f,
1393 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001394 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001395 46.0f, 47.0f, 48.0f,
1396 49.0f, 50.0f, 51.0f,
1397 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001398 })
1399 );
1400
telsoa01c577f2c2018-08-31 09:22:23 +01001401 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001402 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001403
telsoa01c577f2c2018-08-31 09:22:23 +01001404 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001405 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001406
telsoa014fcda012018-03-09 14:13:49 +00001407 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1408
1409 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1410
1411 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1412 subTensorsSupported ?
1413 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1414 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1415
1416 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1417 subTensorsSupported ?
1418 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1419 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1420
Jim Flynne242f2d2019-05-22 14:24:13 +01001421 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001422 armnn::WorkloadInfo info;
1423 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1424 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001425 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1426
1427 data.m_ViewOrigins.push_back(window1);
1428 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001429
Jim Flynn4ed6c832019-05-20 11:02:46 +01001430 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001431
1432 inputHandle1->Allocate();
1433 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001434 outputHandle->Allocate();
1435
1436 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1437 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001438
Derek Lambertif30f7d32019-04-09 10:25:02 +01001439 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001440 workload->Execute();
1441
1442 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1443
1444 return ret;
1445}
1446
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001447LayerTestResult<float,4> AdditionTest(
1448 armnn::IWorkloadFactory& workloadFactory,
1449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001450{
1451 unsigned int batchSize = 2;
1452 unsigned int channels = 2;
1453 unsigned int height = 2;
1454 unsigned int width = 3;
1455
1456 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1457 armnn::TensorInfo outputTensorInfo;
1458
1459 unsigned int shape[] = {batchSize, channels, height, width};
1460
1461 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1462 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1463 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1464
1465
1466 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1467 {
1468 0.0f, 2.0f, 1.0f,
1469 0.2f, 1.0f, 2.0f,
1470
1471 1.0f, 2.0f, 1.0f,
1472 0.2f, 1.0f, 2.0f,
1473
1474 0.0f, 2.0f, 1.0f,
1475 4.2f, 1.0f, 2.0f,
1476
1477 0.0f, 0.0f, 1.0f,
1478 0.2f, 1.0f, 2.0f,
1479 }));
1480
1481 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1482 {
1483 1.0f, 2.0f, 1.0f,
1484 0.0f, 1.0f, 2.0f,
1485
1486 1.0f, 2.0f, -2.0f,
1487 0.2f, 1.0f, 2.0f,
1488
1489 0.0f, 2.0f, 1.0f,
1490 4.2f, 0.0f, -3.0f,
1491
1492 0.0f, 0.0f, 1.0f,
1493 0.7f, 1.0f, 5.0f,
1494 }));
1495
1496 LayerTestResult<float,4> ret(outputTensorInfo);
1497 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1498 {
1499 1.0f, 4.0f, 2.0f,
1500 0.2f, 2.0f, 4.0f,
1501
1502 2.0f, 4.0f, -1.0f,
1503 0.4f, 2.0f, 4.0f,
1504
1505 0.0f, 4.0f, 2.0f,
1506 8.4f, 1.0f, -1.0f,
1507
1508 0.0f, 0.0f, 2.0f,
1509 0.9f, 2.0f, 7.0f,
1510 }));
1511
1512 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1513 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1514 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1515
1516 armnn::AdditionQueueDescriptor data;
1517 armnn::WorkloadInfo info;
1518 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1519 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1520 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1521
1522 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1523
1524 inputHandle1->Allocate();
1525 inputHandle2->Allocate();
1526 outputHandle->Allocate();
1527
1528 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1529 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1530
Derek Lambertif30f7d32019-04-09 10:25:02 +01001531 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001532 workload->Execute();
1533
1534 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1535
1536 return ret;
1537}
1538
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001539template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001540LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1541 armnn::IWorkloadFactory& workloadFactory,
1542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001543 float qScale,
1544 int32_t qOffset)
1545{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001546 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1547 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1548 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001549
1550 if (armnn::IsQuantizedType<T>())
1551 {
1552 inputTensorInfo1.SetQuantizationScale(qScale);
1553 inputTensorInfo1.SetQuantizationOffset(qOffset);
1554 inputTensorInfo2.SetQuantizationScale(qScale);
1555 inputTensorInfo2.SetQuantizationOffset(qOffset);
1556 outputTensorInfo.SetQuantizationScale(qScale);
1557 outputTensorInfo.SetQuantizationOffset(qOffset);
1558 }
1559
1560 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1561 {
1562 0.0f,
1563 1.0f,
1564
1565 2.0f,
1566 3.0f,
1567
1568 4.0f,
1569 5.0f,
1570 }));
1571
1572 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1573 {
1574 0.5f, 1.5f, 2.5f,
1575 3.5f, 4.5f, 5.5f,
1576 }));
1577
1578 LayerTestResult<T,4> ret(outputTensorInfo);
1579 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1580 {
1581 0.5f, 1.5f, 2.5f,
1582 4.5f, 5.5f, 6.5f,
1583
1584 2.5f, 3.5f, 4.5f,
1585 6.5f, 7.5f, 8.5f,
1586
1587 4.5f, 5.5f, 6.5f,
1588 8.5f, 9.5f, 10.5f,
1589 }));
1590
1591 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1592 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1593 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1594
1595 armnn::AdditionQueueDescriptor data;
1596 armnn::WorkloadInfo info;
1597 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1598 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1599 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1600
1601 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1602
1603 inputHandle1->Allocate();
1604 inputHandle2->Allocate();
1605 outputHandle->Allocate();
1606
1607 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1608 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1609
Derek Lambertif30f7d32019-04-09 10:25:02 +01001610 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001611 workload->Execute();
1612
1613 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1614
1615 return ret;
1616}
1617
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001618template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001619LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1620 armnn::IWorkloadFactory& workloadFactory,
1621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001622 float qScale,
1623 int32_t qOffset)
1624{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001625 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1626 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1627 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001628
1629 if (armnn::IsQuantizedType<T>())
1630 {
1631 inputTensorInfo1.SetQuantizationScale(qScale);
1632 inputTensorInfo1.SetQuantizationOffset(qOffset);
1633 inputTensorInfo2.SetQuantizationScale(qScale);
1634 inputTensorInfo2.SetQuantizationOffset(qOffset);
1635 outputTensorInfo.SetQuantizationScale(qScale);
1636 outputTensorInfo.SetQuantizationOffset(qOffset);
1637 }
1638
1639 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1640 {
1641 0.0f, 1.0f, 2.0f,
1642 3.0f, 4.0f, 5.0f,
1643 6.0f, 7.0f, 8.0f,
1644 9.0f, 10.0f, 11.0f,
1645 12.0f, 13.0f, 14.0f,
1646 15.0f, 16.0f, 17.0f,
1647 }));
1648
1649 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1650 {
1651 0.5f,
1652 }));
1653
1654 LayerTestResult<T,4> ret(outputTensorInfo);
1655 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1656 {
1657 0.5f, 1.5f, 2.5f,
1658 3.5f, 4.5f, 5.5f,
1659 6.5f, 7.5f, 8.5f,
1660 9.5f, 10.5f, 11.5f,
1661 12.5f, 13.5f, 14.5f,
1662 15.5f, 16.5f, 17.5f,
1663 }));
1664
1665 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1666 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1667 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1668
1669 armnn::AdditionQueueDescriptor data;
1670 armnn::WorkloadInfo info;
1671 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1672 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1673 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1674
1675 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1676
1677 inputHandle1->Allocate();
1678 inputHandle2->Allocate();
1679 outputHandle->Allocate();
1680
1681 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1682 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1683
Derek Lambertif30f7d32019-04-09 10:25:02 +01001684 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001685 workload->Execute();
1686
1687 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1688
1689 return ret;
1690}
1691
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001692LayerTestResult<float, 4> AdditionBroadcastTest(
1693 armnn::IWorkloadFactory& workloadFactory,
1694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001695{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001696 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1697 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001698}
1699
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001700LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1701 armnn::IWorkloadFactory& workloadFactory,
1702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001703{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001704 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1705 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001706}
1707
Sadik Armagan2999a022019-04-09 14:20:12 +01001708LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1709 armnn::IWorkloadFactory& workloadFactory,
1710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1711{
1712 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1713 workloadFactory, memoryManager, 2.f, 0);
1714}
1715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001716LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1717 armnn::IWorkloadFactory& workloadFactory,
1718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001719{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001720 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1721 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001722}
1723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001724LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1725 armnn::IWorkloadFactory& workloadFactory,
1726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001727{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001728 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1729 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001730}
1731
Sadik Armagan2999a022019-04-09 14:20:12 +01001732LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1733 armnn::IWorkloadFactory& workloadFactory,
1734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1735{
1736 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1737 workloadFactory, memoryManager, 0.1333333f, 0);
1738}
1739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001740LayerTestResult<float,4> CompareAdditionTest(
1741 armnn::IWorkloadFactory& workloadFactory,
1742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1743 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001744{
1745 unsigned int batchSize = 4;
1746 unsigned int channels = 1;
1747 unsigned int height = 2;
1748 unsigned int width = 3;
1749
1750 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1751 armnn::TensorInfo outputTensorInfo;
1752
1753 unsigned int shape[] = {batchSize, channels, height, width};
1754
1755 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1756 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1757 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1758
1759 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1760 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1761
1762 LayerTestResult<float,4> ret(outputTensorInfo);
1763
1764 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1765 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1766 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1767
1768 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1769 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1770 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1771
1772 armnn::AdditionQueueDescriptor data;
1773 armnn::WorkloadInfo info;
1774 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1775 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1776 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1777
1778 armnn::AdditionQueueDescriptor refData = data;
1779 armnn::WorkloadInfo refInfo = info;
1780 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1781 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1782 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1783
1784 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1785 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1786
1787 inputHandle1->Allocate();
1788 inputHandle2->Allocate();
1789 outputHandle->Allocate();
1790 inputHandle1Ref->Allocate();
1791 inputHandle2Ref->Allocate();
1792 outputHandleRef->Allocate();
1793
1794 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1795 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1796 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1797 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1798
Derek Lambertif30f7d32019-04-09 10:25:02 +01001799 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001800 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01001801 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001802 workloadRef->Execute();
1803
1804 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1805 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1806
1807 return ret;
1808}
1809
surmeh01bceff2f2018-03-29 16:29:27 +01001810namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001811template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001812LayerTestResult<T, 4> DivisionTestHelper(
1813 armnn::IWorkloadFactory& workloadFactory,
1814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1815 const unsigned int shape0[4],
1816 const std::vector<T>& values0,
1817 float scale0,
1818 int32_t offset0,
1819 const unsigned int shape1[4],
1820 const std::vector<T> & values1,
1821 float scale1,
1822 int32_t offset1,
1823 const unsigned int outShape[4],
1824 const std::vector<T> & outValues,
1825 float outScale,
1826 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001827{
Sadik Armagan2999a022019-04-09 14:20:12 +01001828 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1829 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1830 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001831
David Beck5cd01f32018-09-12 16:00:08 +01001832 inputTensorInfo0.SetQuantizationScale(scale0);
1833 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001834
David Beck5cd01f32018-09-12 16:00:08 +01001835 inputTensorInfo1.SetQuantizationScale(scale1);
1836 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001837
David Beck5cd01f32018-09-12 16:00:08 +01001838 outputTensorInfo.SetQuantizationScale(outScale);
1839 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001840
David Beck5cd01f32018-09-12 16:00:08 +01001841 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1842 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001843
David Beck5cd01f32018-09-12 16:00:08 +01001844 LayerTestResult<T, 4> result(outputTensorInfo);
1845 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001846
David Beck5cd01f32018-09-12 16:00:08 +01001847 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1848 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1849 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001850
David Beck5cd01f32018-09-12 16:00:08 +01001851 armnn::DivisionQueueDescriptor data;
1852 armnn::WorkloadInfo info;
1853 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1854 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1855 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001856
David Beck5cd01f32018-09-12 16:00:08 +01001857 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001858
David Beck5cd01f32018-09-12 16:00:08 +01001859 inputHandle0->Allocate();
1860 inputHandle1->Allocate();
1861 outputHandle->Allocate();
1862
1863 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1864 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1865
Derek Lambertif30f7d32019-04-09 10:25:02 +01001866 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01001867 workload->Execute();
1868
1869 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1870
1871 return result;
1872}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001873} // anonymous namespace
1874
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001875LayerTestResult<float,4> DivisionByZeroTest(
1876 armnn::IWorkloadFactory& workloadFactory,
1877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001878{
1879 const unsigned int width = 2;
1880 const unsigned int height = 2;
1881 const unsigned int channelCount = 2;
1882 const unsigned int batchSize = 2;
1883
1884 unsigned int shape[] = { batchSize, channelCount, height, width };
1885
1886 std::vector<float> input0({
1887 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1888 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1889
1890 std::vector<float> input1({
1891 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1892 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1893
1894 std::vector<float> output({
1895 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1896 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1897
Sadik Armagan2999a022019-04-09 14:20:12 +01001898 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1899 memoryManager,
1900 shape, input0, 1.0f, 0,
1901 shape, input1, 1.0f, 0,
1902 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001903}
1904
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001905LayerTestResult<float,4> DivisionTest(
1906 armnn::IWorkloadFactory& workloadFactory,
1907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001908{
1909 const unsigned int width = 2;
1910 const unsigned int height = 2;
1911 const unsigned int channelCount = 2;
1912 const unsigned int batchSize = 2;
1913
1914 unsigned int shape[] = { batchSize, channelCount, height, width };
1915
1916 std::vector<float> input0({
1917 2, 2, 2, 2, 3, 3, 3, 3,
1918 4, 4, 4, 4, 5, 5, 5, 5 });
1919
1920 std::vector<float> input1({
1921 1, 1, 1, 1, 2, 2, 2, 2,
1922 4, 4, 4, 4, 4, 4, 4, 4 });
1923
1924 std::vector<float> output({
1925 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1926 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1927
David Beck5cd01f32018-09-12 16:00:08 +01001928
Sadik Armagan2999a022019-04-09 14:20:12 +01001929 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1930 memoryManager,
1931 shape, input0, 1.0f, 0,
1932 shape, input1, 1.0f, 0,
1933 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001934}
1935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001936LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1937 armnn::IWorkloadFactory& workloadFactory,
1938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001939{
1940 unsigned int shape0[] = { 1, 2, 2, 2 };
1941 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1942
1943 unsigned int shape1[] = { 1, 1, 1, 1 };
1944 std::vector<float> input1({ 2 });
1945
1946 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1947
David Beck5cd01f32018-09-12 16:00:08 +01001948
Sadik Armagan2999a022019-04-09 14:20:12 +01001949 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1950 memoryManager,
1951 shape0, input0, 1.0f, 0,
1952 shape1, input1, 1.0f, 0,
1953 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001954}
1955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001956LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1957 armnn::IWorkloadFactory& workloadFactory,
1958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001959{
1960 unsigned int shape0[] = { 1, 3, 3, 2 };
1961 std::vector<float> input0({
1962 1, 4, 3, 8, 5, 12,
1963 7, 16, 9, 20, 11, 24,
1964 13, 28, 15, 32, 17, 36});
1965
1966 unsigned int shape1[] = { 1, 1, 1, 2 };
1967 std::vector<float> input1({ 1, 2 });
1968
1969 std::vector<float> output({
1970 1, 2, 3, 4, 5, 6,
1971 7, 8, 9, 10, 11, 12,
1972 13, 14, 15, 16, 17, 18});
1973
Sadik Armagan2999a022019-04-09 14:20:12 +01001974 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1975 memoryManager,
1976 shape0, input0, 1.0f, 0,
1977 shape1, input1, 1.0f, 0,
1978 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001979}
1980
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001981LayerTestResult<uint8_t,4> DivisionUint8Test(
1982 armnn::IWorkloadFactory& workloadFactory,
1983 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001984{
1985 const unsigned int width = 2;
1986 const unsigned int height = 2;
1987 const unsigned int channelCount = 2;
1988 const unsigned int batchSize = 2;
1989
1990 unsigned int shape[] = { batchSize, channelCount, height, width };
1991
1992 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1993 4, 4, 4, 4, 5, 5, 5, 5 });
1994
1995 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1996 4, 4, 4, 4, 4, 4, 4, 4 });
1997
1998 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1999 4, 4, 4, 4, 5, 5, 5, 5});
2000
2001
Sadik Armagan2999a022019-04-09 14:20:12 +01002002 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2003 memoryManager,
2004 shape, input0, 1.0f, 0,
2005 shape, input1, 1.0f, 0,
2006 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002007}
2008
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002009LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2010 armnn::IWorkloadFactory& workloadFactory,
2011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002012{
2013 unsigned int shape0[] = { 1, 2, 2, 2 };
2014 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2015
2016 unsigned int shape1[] = { 1, 1, 1, 1 };
2017 std::vector<uint8_t> input1({ 2 });
2018
2019 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2020
Sadik Armagan2999a022019-04-09 14:20:12 +01002021 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2022 memoryManager,
2023 shape0, input0, 1.0f, 0,
2024 shape1, input1, 1.0f, 0,
2025 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002026}
2027
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002028LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2029 armnn::IWorkloadFactory& workloadFactory,
2030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002031{
2032 unsigned int shape0[] = { 1, 3, 3, 2 };
2033 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2034 7, 16, 9, 20, 11, 24,
2035 13, 28, 15, 32, 17, 36});
2036
2037 unsigned int shape1[] = { 1, 1, 1, 2 };
2038 std::vector<uint8_t> input1({ 1, 2 });
2039
2040 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2041 7, 8, 9, 10, 11, 12,
2042 13, 14, 15, 16, 17, 18});
2043
Sadik Armagan2999a022019-04-09 14:20:12 +01002044 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2045 memoryManager,
2046 shape0, input0, 1.0f, 0,
2047 shape1, input1, 1.0f, 0,
2048 shape0, output, 1.0f, 0);
2049}
2050
2051LayerTestResult<int16_t,4> DivisionInt16Test(
2052 armnn::IWorkloadFactory& workloadFactory,
2053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2054{
2055 unsigned int shape[] = { 2, 2, 2, 2 };
2056
2057 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2058 4, 4, 4, 4, 5, 5, 5, 5 });
2059
2060 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2061 4, 4, 4, 4, 4, 4, 4, 4 });
2062
2063 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2064 4, 4, 4, 4, 5, 5, 5, 5});
2065
2066
2067 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2068 memoryManager,
2069 shape, input0, 1.0f, 0,
2070 shape, input1, 1.0f, 0,
2071 shape, output, 0.25f, 0);
2072}
2073
2074LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2075 armnn::IWorkloadFactory& workloadFactory,
2076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2077{
2078 unsigned int shape0[] = { 1, 2, 2, 2 };
2079 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2080
2081 unsigned int shape1[] = { 1, 1, 1, 1 };
2082 std::vector<int16_t> input1({ 2 });
2083
2084 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2085
2086 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2087 memoryManager,
2088 shape0, input0, 1.0f, 0,
2089 shape1, input1, 1.0f, 0,
2090 shape0, output, 1.0f, 0);
2091}
2092
2093LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2094 armnn::IWorkloadFactory& workloadFactory,
2095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2096{
2097 unsigned int shape0[] = { 1, 3, 3, 2 };
2098 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2099 7, 16, 9, 20, 11, 24,
2100 13, 28, 15, 32, 17, 36});
2101
2102 unsigned int shape1[] = { 1, 1, 1, 2 };
2103 std::vector<int16_t> input1({ 1, 2 });
2104
2105 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2106 7, 8, 9, 10, 11, 12,
2107 13, 14, 15, 16, 17, 18});
2108
2109 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2110 memoryManager,
2111 shape0, input0, 1.0f, 0,
2112 shape1, input1, 1.0f, 0,
2113 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002114}
2115
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002116template<typename DescriptorType>
2117std::unique_ptr<armnn::IWorkload> CreateWorkload(
2118 const armnn::IWorkloadFactory& workloadFactory,
2119 const armnn::WorkloadInfo& info,
2120 const DescriptorType& descriptor)
2121{
2122 return CreateWorkload(workloadFactory, info, descriptor);
2123};
2124
2125template<>
2126std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2127 const armnn::IWorkloadFactory& workloadFactory,
2128 const armnn::WorkloadInfo& info,
2129 const armnn::MaximumQueueDescriptor& descriptor)
2130{
2131 return workloadFactory.CreateMaximum(descriptor, info);
2132}
2133
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002134template<>
2135std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2136 const armnn::IWorkloadFactory& workloadFactory,
2137 const armnn::WorkloadInfo& info,
2138 const armnn::MinimumQueueDescriptor& descriptor)
2139{
2140 return workloadFactory.CreateMinimum(descriptor, info);
2141}
2142
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002143template<>
2144std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2145 const armnn::IWorkloadFactory& workloadFactory,
2146 const armnn::WorkloadInfo& info,
2147 const armnn::EqualQueueDescriptor& descriptor)
2148{
2149 return workloadFactory.CreateEqual(descriptor, info);
2150}
2151
FrancisMurtagh878f0232018-12-19 10:56:15 +00002152template<>
2153std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2154 const armnn::IWorkloadFactory& workloadFactory,
2155 const armnn::WorkloadInfo& info,
2156 const armnn::GreaterQueueDescriptor& descriptor)
2157{
2158 return workloadFactory.CreateGreater(descriptor, info);
2159}
2160
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002161namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002162
2163template <typename Descriptor,
2164 armnn::DataType ArmnnTypeInput,
2165 armnn::DataType ArmnnTypeOutput,
2166 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2167 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2168LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2169 armnn::IWorkloadFactory & workloadFactory,
2170 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2171 const unsigned int shape0[4], std::vector<TInput> values0,
2172 const unsigned int shape1[4], std::vector<TInput> values1,
2173 const unsigned int outShape[4], std::vector<TOutput> outValues,
2174 float qScale = 0.0f, int qOffset = 0)
2175{
2176 const size_t dimensionCount = 4;
2177 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2178 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2179 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2180
2181 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2182 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2183
2184 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002185 {
kevmay012b4d88e2019-01-24 14:05:09 +00002186 inputTensorInfo0.SetQuantizationScale(qScale);
2187 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002188
kevmay012b4d88e2019-01-24 14:05:09 +00002189 inputTensorInfo1.SetQuantizationScale(qScale);
2190 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002191
kevmay012b4d88e2019-01-24 14:05:09 +00002192 outputTensorInfo.SetQuantizationScale(qScale);
2193 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002194 }
kevmay012b4d88e2019-01-24 14:05:09 +00002195
2196 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2197
2198 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2199 {
2200 ret.compareBoolean = true;
2201 }
2202
2203 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2204 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2205 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2206
2207 Descriptor data;
2208 armnn::WorkloadInfo info;
2209 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2210 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2211 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2212 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2213
2214 inputHandle0->Allocate();
2215 inputHandle1->Allocate();
2216 outputHandle->Allocate();
2217
2218 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2219 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2220
Derek Lambertif30f7d32019-04-09 10:25:02 +01002221 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002222 ExecuteWorkload(*workload, memoryManager);
2223
2224 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2225
2226 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2227 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002228}
2229
kevmay012b4d88e2019-01-24 14:05:09 +00002230template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2231LayerTestResult<T, 4> ElementwiseTestHelper(
2232 armnn::IWorkloadFactory & workloadFactory,
2233 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2234 const unsigned int shape0[4], std::vector<T> values0,
2235 const unsigned int shape1[4], std::vector<T> values1,
2236 const unsigned int outShape[4], std::vector<T> outValues,
2237 float qScale = 0.0f, int qOffset = 0)
2238{
2239 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2240 (workloadFactory,
2241 memoryManager,
2242 shape0,
2243 values0,
2244 shape1,
2245 values1,
2246 outShape,
2247 outValues,
2248 qScale,
2249 qOffset);
2250}
2251}
2252
2253LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002255{
2256 const unsigned int width = 2;
2257 const unsigned int height = 2;
2258 const unsigned int channelCount = 2;
2259 const unsigned int batchSize = 2;
2260
2261 unsigned int shape[] = { batchSize, channelCount, height, width };
2262
2263 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2264 3, 3, 3, 3, 4, 4, 4, 4 });
2265
2266 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2267 5, 5, 5, 5, 4, 4, 4, 4 });
2268
kevmay012b4d88e2019-01-24 14:05:09 +00002269 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2270 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002271
kevmay012b4d88e2019-01-24 14:05:09 +00002272 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002273 workloadFactory,
2274 memoryManager,
2275 shape,
2276 input0,
2277 shape,
2278 input1,
2279 shape,
2280 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002281}
2282
kevmay012b4d88e2019-01-24 14:05:09 +00002283LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002284 armnn::IWorkloadFactory& workloadFactory,
2285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2286{
2287 unsigned int shape0[] = { 1, 2, 2, 2 };
2288 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2289
2290 unsigned int shape1[] = { 1, 1, 1, 1 };
2291 std::vector<float> input1({ 1 });
2292
kevmay012b4d88e2019-01-24 14:05:09 +00002293 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002294
kevmay012b4d88e2019-01-24 14:05:09 +00002295 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002296 workloadFactory,
2297 memoryManager,
2298 shape0,
2299 input0,
2300 shape1,
2301 input1,
2302 shape0,
2303 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002304}
2305
kevmay012b4d88e2019-01-24 14:05:09 +00002306LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002307 armnn::IWorkloadFactory& workloadFactory,
2308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2309{
2310 const unsigned int shape0[] = { 1, 2, 2, 3 };
2311 const unsigned int shape1[] = { 1, 1, 1, 3 };
2312
2313 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2314 7, 8, 9, 10, 11, 12 });
2315
2316 std::vector<float> input1({ 1, 2, 3});
2317
kevmay012b4d88e2019-01-24 14:05:09 +00002318 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2319 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002320
kevmay012b4d88e2019-01-24 14:05:09 +00002321 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002322 workloadFactory,
2323 memoryManager,
2324 shape0,
2325 input0,
2326 shape1,
2327 input1,
2328 shape0,
2329 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002330}
2331
2332LayerTestResult<uint8_t, 4> EqualUint8Test(
2333 armnn::IWorkloadFactory& workloadFactory,
2334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2335{
2336 unsigned int shape[] = { 2, 2, 2, 2 };
2337
2338 // See dequantized values to the right.
2339 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002340 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002341
2342 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2343 3, 3, 3, 3, 5, 5, 5, 5 });
2344
2345 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2346 1, 1, 1, 1, 0, 0, 0, 0 });
2347
kevmay012b4d88e2019-01-24 14:05:09 +00002348 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2349 armnn::DataType::QuantisedAsymm8,
2350 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002351 workloadFactory,
2352 memoryManager,
2353 shape,
2354 input0,
2355 shape,
2356 input1,
2357 shape,
2358 output,
2359 1.0f,
2360 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002361}
2362
2363LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2364 armnn::IWorkloadFactory& workloadFactory,
2365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2366{
2367 const unsigned int shape0[] = { 1, 2, 2, 3 };
2368 const unsigned int shape1[] = { 1, 1, 1, 1 };
2369
2370 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2371 7, 8, 9, 10, 11, 12 });
2372
2373 std::vector<uint8_t> input1({ 1 });
2374
2375 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2376 0, 0, 0, 0, 0, 0 });
2377
kevmay012b4d88e2019-01-24 14:05:09 +00002378 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2379 armnn::DataType::QuantisedAsymm8,
2380 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002381 workloadFactory,
2382 memoryManager,
2383 shape0,
2384 input0,
2385 shape1,
2386 input1,
2387 shape0,
2388 output,
2389 1.0f,
2390 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002391}
2392
2393LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2394 armnn::IWorkloadFactory& workloadFactory,
2395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2396{
2397 const unsigned int shape0[] = { 1, 2, 2, 3 };
2398 const unsigned int shape1[] = { 1, 1, 1, 3 };
2399
2400 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2401 7, 8, 9, 10, 11, 12 });
2402
2403 std::vector<uint8_t> input1({ 1, 1, 3});
2404
2405 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2406 0, 0, 0, 0, 0, 0 });
2407
kevmay012b4d88e2019-01-24 14:05:09 +00002408 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2409 armnn::DataType::QuantisedAsymm8,
2410 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002411 workloadFactory,
2412 memoryManager,
2413 shape0,
2414 input0,
2415 shape1,
2416 input1,
2417 shape0,
2418 output,
2419 1.0f,
2420 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002421}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002422
kevmay012b4d88e2019-01-24 14:05:09 +00002423LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2425{
2426 const unsigned int width = 2;
2427 const unsigned int height = 2;
2428 const unsigned int channelCount = 2;
2429 const unsigned int batchSize = 2;
2430
2431 unsigned int shape[] = { batchSize, channelCount, height, width };
2432
2433 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2434 3, 3, 3, 3, 4, 4, 4, 4 });
2435
2436 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2437 5, 5, 5, 5, 4, 4, 4, 4 });
2438
kevmay012b4d88e2019-01-24 14:05:09 +00002439 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2440 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002441
kevmay012b4d88e2019-01-24 14:05:09 +00002442 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002443 workloadFactory,
2444 memoryManager,
2445 shape,
2446 input0,
2447 shape,
2448 input1,
2449 shape,
2450 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002451}
2452
kevmay012b4d88e2019-01-24 14:05:09 +00002453LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002454 armnn::IWorkloadFactory& workloadFactory,
2455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2456{
2457 unsigned int shape0[] = { 1, 2, 2, 2 };
2458 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2459
2460 unsigned int shape1[] = { 1, 1, 1, 1 };
2461 std::vector<float> input1({ 1 });
2462
kevmay012b4d88e2019-01-24 14:05:09 +00002463 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002464
kevmay012b4d88e2019-01-24 14:05:09 +00002465 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002466 workloadFactory,
2467 memoryManager,
2468 shape0,
2469 input0,
2470 shape1,
2471 input1,
2472 shape0,
2473 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002474}
2475
kevmay012b4d88e2019-01-24 14:05:09 +00002476LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002477 armnn::IWorkloadFactory& workloadFactory,
2478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2479{
2480 const unsigned int shape0[] = { 1, 2, 2, 3 };
2481 const unsigned int shape1[] = { 1, 1, 1, 3 };
2482
2483 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2484 7, 8, 9, 10, 11, 12 });
2485
2486 std::vector<float> input1({ 1, 3, 2});
2487
kevmay012b4d88e2019-01-24 14:05:09 +00002488 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2489 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002490
kevmay012b4d88e2019-01-24 14:05:09 +00002491 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002492 workloadFactory,
2493 memoryManager,
2494 shape0,
2495 input0,
2496 shape1,
2497 input1,
2498 shape0,
2499 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002500}
2501
2502LayerTestResult<uint8_t, 4> GreaterUint8Test(
2503 armnn::IWorkloadFactory& workloadFactory,
2504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2505{
2506 unsigned int shape[] = { 2, 2, 2, 2 };
2507
2508 // See dequantized values to the right.
2509 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2510 3, 3, 3, 3, 5, 5, 5, 5 });
2511
2512 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2513 2, 2, 2, 2, 5, 5, 5, 5 });
2514
2515 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2516 1, 1, 1, 1, 0, 0, 0, 0 });
2517
kevmay012b4d88e2019-01-24 14:05:09 +00002518 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2519 armnn::DataType::QuantisedAsymm8,
2520 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002521 workloadFactory,
2522 memoryManager,
2523 shape,
2524 input0,
2525 shape,
2526 input1,
2527 shape,
2528 output,
2529 1.0f,
2530 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002531}
2532
2533LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2534 armnn::IWorkloadFactory& workloadFactory,
2535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2536{
2537 const unsigned int shape0[] = { 1, 2, 2, 3 };
2538 const unsigned int shape1[] = { 1, 1, 1, 1 };
2539
2540 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2541 7, 8, 9, 10, 11, 12 });
2542
2543 std::vector<uint8_t> input1({ 1 });
2544
2545 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2546 1, 1, 1, 1, 1, 1 });
2547
kevmay012b4d88e2019-01-24 14:05:09 +00002548 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2549 armnn::DataType::QuantisedAsymm8,
2550 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002551 workloadFactory,
2552 memoryManager,
2553 shape0,
2554 input0,
2555 shape1,
2556 input1,
2557 shape0,
2558 output,
2559 1.0f,
2560 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002561}
2562
2563LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2564 armnn::IWorkloadFactory& workloadFactory,
2565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2566{
2567 const unsigned int shape0[] = { 1, 2, 2, 3 };
2568 const unsigned int shape1[] = { 1, 1, 1, 3 };
2569
2570 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2571 7, 8, 9, 10, 11, 12 });
2572
2573 std::vector<uint8_t> input1({ 1, 1, 3});
2574
2575 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2576 1, 1, 1, 1, 1, 1 });
2577
kevmay012b4d88e2019-01-24 14:05:09 +00002578 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2579 armnn::DataType::QuantisedAsymm8,
2580 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002581 workloadFactory,
2582 memoryManager,
2583 shape0,
2584 input0,
2585 shape1,
2586 input1,
2587 shape0,
2588 output,
2589 1.0f,
2590 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002591}
2592
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002593LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2595{
2596 const unsigned int width = 2;
2597 const unsigned int height = 2;
2598 const unsigned int channelCount = 2;
2599 const unsigned int batchSize = 2;
2600
2601 unsigned int shape[] = { batchSize, channelCount, height, width };
2602
2603 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2604 3, 3, 3, 3, 4, 4, 4, 4 });
2605
2606 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2607 4, 4, 4, 4, 5, 5, 5, 5 });
2608
2609 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2610 4, 4, 4, 4, 5, 5, 5, 5 });
2611
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002612 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2613 workloadFactory,
2614 memoryManager,
2615 shape,
2616 input0,
2617 shape,
2618 input1,
2619 shape,
2620 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002621}
2622
2623LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2624 armnn::IWorkloadFactory& workloadFactory,
2625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2626{
2627 unsigned int shape0[] = { 1, 2, 2, 2 };
2628 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2629
2630 unsigned int shape1[] = { 1, 1, 1, 1 };
2631 std::vector<float> input1({ 2 });
2632
2633 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2634
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002635 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2636 workloadFactory,
2637 memoryManager,
2638 shape0,
2639 input0,
2640 shape1,
2641 input1,
2642 shape0,
2643 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002644}
2645
2646LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2647 armnn::IWorkloadFactory& workloadFactory,
2648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2649{
2650 const unsigned int shape0[] = { 1, 2, 2, 3 };
2651 const unsigned int shape1[] = { 1, 1, 1, 3 };
2652
2653 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2654 7, 8, 9, 10, 11, 12 });
2655
2656 std::vector<float> input1({ 1, 2, 3});
2657
2658 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002659 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002661 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2662 workloadFactory,
2663 memoryManager,
2664 shape0,
2665 input0,
2666 shape1,
2667 input1,
2668 shape0,
2669 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002670}
2671
2672LayerTestResult<uint8_t, 4> MaximumUint8Test(
2673 armnn::IWorkloadFactory& workloadFactory,
2674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2675{
2676 unsigned int shape[] = { 2, 2, 2, 2 };
2677
2678 // See dequantized values to the right.
2679 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2680 3, 3, 3, 3, 4, 4, 4, 4 });
2681
2682 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2683 4, 4, 4, 4, 5, 5, 5, 5 });
2684
2685 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2686 4, 4, 4, 4, 5, 5, 5, 5 });
2687
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002688 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2689 workloadFactory,
2690 memoryManager,
2691 shape,
2692 input0,
2693 shape,
2694 input1,
2695 shape,
2696 output,
2697 1.0f,
2698 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002699}
2700
2701LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2702 armnn::IWorkloadFactory& workloadFactory,
2703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2704{
2705 const unsigned int shape0[] = { 1, 2, 2, 3 };
2706 const unsigned int shape1[] = { 1, 1, 1, 1 };
2707
2708 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2709 7, 8, 9, 10, 11, 12 });
2710
2711 std::vector<uint8_t> input1({2});
2712
2713 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2714 7, 8, 9, 10, 11, 12 });
2715
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002716 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2717 workloadFactory,
2718 memoryManager,
2719 shape0,
2720 input0,
2721 shape1,
2722 input1,
2723 shape0,
2724 output,
2725 1.0f,
2726 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002727}
2728
2729LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2730 armnn::IWorkloadFactory& workloadFactory,
2731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2732{
2733 const unsigned int shape0[] = { 1, 2, 2, 3 };
2734 const unsigned int shape1[] = { 1, 1, 1, 3 };
2735
2736 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2737 7, 8, 9, 10, 11, 12 });
2738
2739 std::vector<uint8_t> input1({ 1, 10, 3});
2740
2741 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2742 7, 10, 9, 10, 11, 12 });
2743
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002744 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2745 workloadFactory,
2746 memoryManager,
2747 shape0,
2748 input0,
2749 shape1,
2750 input1,
2751 shape0,
2752 output,
2753 1.0f,
2754 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002755}
2756
Sadik Armagan2999a022019-04-09 14:20:12 +01002757LayerTestResult<int16_t, 4> MaximumInt16Test(
2758 armnn::IWorkloadFactory& workloadFactory,
2759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2760{
2761 unsigned int shape[] = { 2, 2, 2, 2 };
2762
2763 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2764 3, 3, 3, 3, 4, 4, 4, 4 });
2765
2766 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2767 4, 4, 4, 4, 5, 5, 5, 5 });
2768
2769 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2770 4, 4, 4, 4, 5, 5, 5, 5 });
2771
2772 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2773 workloadFactory,
2774 memoryManager,
2775 shape,
2776 input0,
2777 shape,
2778 input1,
2779 shape,
2780 output,
2781 1.0f,
2782 0);
2783}
2784
2785LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2786 armnn::IWorkloadFactory& workloadFactory,
2787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2788{
2789 const unsigned int shape0[] = { 1, 2, 2, 3 };
2790 const unsigned int shape1[] = { 1, 1, 1, 1 };
2791
2792 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2793 7, 8, 9, 10, 11, 12 });
2794
2795 std::vector<int16_t> input1({2});
2796
2797 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2798 7, 8, 9, 10, 11, 12 });
2799
2800 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2801 workloadFactory,
2802 memoryManager,
2803 shape0,
2804 input0,
2805 shape1,
2806 input1,
2807 shape0,
2808 output,
2809 1.0f,
2810 0);
2811}
2812
2813LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2814 armnn::IWorkloadFactory& workloadFactory,
2815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2816{
2817 const unsigned int shape0[] = { 1, 2, 2, 3 };
2818 const unsigned int shape1[] = { 1, 1, 1, 3 };
2819
2820 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2821 7, 8, 9, 10, 11, 12 });
2822
2823 std::vector<int16_t> input1({ 1, 10, 3});
2824
2825 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2826 7, 10, 9, 10, 11, 12 });
2827
2828 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2829 workloadFactory,
2830 memoryManager,
2831 shape0,
2832 input0,
2833 shape1,
2834 input1,
2835 shape0,
2836 output,
2837 1.0f,
2838 0);
2839}
2840
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002841LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2842 armnn::IWorkloadFactory& workloadFactory,
2843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2844{
2845 unsigned int shape0[] = { 1, 2, 2, 2 };
2846 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2847
2848 unsigned int shape1[] = { 1, 1, 1, 1 };
2849 std::vector<float> input1({ 2 });
2850
2851 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2852
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002853 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2854 workloadFactory,
2855 memoryManager,
2856 shape0,
2857 input0,
2858 shape1,
2859 input1,
2860 shape0,
2861 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002862}
2863
2864
2865LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2866 armnn::IWorkloadFactory& workloadFactory,
2867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2868{
2869 unsigned int shape0[] = { 1, 2, 2, 2 };
2870 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2871
2872 unsigned int shape1[] = { 1, 1, 1, 1 };
2873 std::vector<float> input1({ 5 });
2874
2875 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2876
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002877 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2878 workloadFactory,
2879 memoryManager,
2880 shape0,
2881 input0,
2882 shape1,
2883 input1,
2884 shape0,
2885 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002886}
2887
2888LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2889 armnn::IWorkloadFactory & workloadFactory,
2890 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2891{
2892 const unsigned int shape0[] = { 1, 2, 2, 3 };
2893 const unsigned int shape1[] = { 1, 1, 1, 3 };
2894
2895 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2896 7, 1, 2, 3, 4, 5 });
2897
2898 std::vector<uint8_t> input1({ 1, 2, 3});
2899
2900 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2901 1, 1, 2, 1, 2, 3 });
2902
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002903 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2904 workloadFactory,
2905 memoryManager,
2906 shape0,
2907 input0,
2908 shape1,
2909 input1,
2910 shape0,
2911 output,
2912 1.0f,
2913 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002914}
2915
Sadik Armagan2999a022019-04-09 14:20:12 +01002916LayerTestResult<int16_t, 4> MinimumInt16Test(
2917 armnn::IWorkloadFactory& workloadFactory,
2918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2919{
2920 unsigned int shape[] = { 2, 2, 2, 2 };
2921
2922 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2923 3, 3, 3, 3, 4, 4, 4, 4 });
2924
2925 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2926 4, 4, 4, 4, 5, 5, 5, 5 });
2927
2928 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2929 3, 3, 3, 3, 4, 4, 4, 4 });
2930
2931 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2932 workloadFactory,
2933 memoryManager,
2934 shape,
2935 input0,
2936 shape,
2937 input1,
2938 shape,
2939 output,
2940 1.0f,
2941 0);
2942}
2943
2944LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2945 armnn::IWorkloadFactory& workloadFactory,
2946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2947{
2948 const unsigned int shape0[] = { 1, 2, 2, 3 };
2949 const unsigned int shape1[] = { 1, 1, 1, 1 };
2950
2951 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2952 7, 8, 9, 10, 11, 12 });
2953
2954 std::vector<int16_t> input1({2});
2955
2956 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2957 2, 2, 2, 2, 2, 2 });
2958
2959 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2960 workloadFactory,
2961 memoryManager,
2962 shape0,
2963 input0,
2964 shape1,
2965 input1,
2966 shape0,
2967 output,
2968 1.0f,
2969 0);
2970}
2971
2972LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2973 armnn::IWorkloadFactory& workloadFactory,
2974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2975{
2976 const unsigned int shape0[] = { 1, 2, 2, 3 };
2977 const unsigned int shape1[] = { 1, 1, 1, 3 };
2978
2979 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2980 7, 8, 9, 10, 11, 12 });
2981
2982 std::vector<int16_t> input1({ 1, 10, 3});
2983
2984 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2985 1, 8, 3, 1, 10, 3 });
2986
2987 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2988 workloadFactory,
2989 memoryManager,
2990 shape0,
2991 input0,
2992 shape1,
2993 input1,
2994 shape0,
2995 output,
2996 1.0f,
2997 0);
2998}
2999
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003000namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003001LayerTestResult<float,4> MultiplicationTestHelper(
3002 armnn::IWorkloadFactory& workloadFactory,
3003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3004 const unsigned int shape0[4],
3005 const std::vector<float> & values0,
3006 const unsigned int shape1[4],
3007 const std::vector<float> & values1,
3008 const unsigned int outShape[4],
3009 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003010{
surmeh01bceff2f2018-03-29 16:29:27 +01003011 const size_t dimensionCount = 4;
3012 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3013 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3014 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003015
surmeh01bceff2f2018-03-29 16:29:27 +01003016 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3017 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003018
3019 LayerTestResult<float,4> ret(outputTensorInfo);
3020
3021 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3022 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3023 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3024
3025 armnn::MultiplicationQueueDescriptor data;
3026 armnn::WorkloadInfo info;
3027 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3028 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3029 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3030
3031 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3032
3033 inputHandle0->Allocate();
3034 inputHandle1->Allocate();
3035 outputHandle->Allocate();
3036
3037 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3038 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3039
Derek Lambertif30f7d32019-04-09 10:25:02 +01003040 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003041 workload->Execute();
3042
3043 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3044
surmeh01bceff2f2018-03-29 16:29:27 +01003045 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003046 return ret;
3047}
surmeh01bceff2f2018-03-29 16:29:27 +01003048} // anonymous namespace
3049
3050
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003051LayerTestResult<float,4> MultiplicationTest(
3052 armnn::IWorkloadFactory& workloadFactory,
3053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003054{
3055 const unsigned int width = 2;
3056 const unsigned int height = 2;
3057 const unsigned int channelCount = 2;
3058 const unsigned int batchSize = 2;
3059
3060 unsigned int shape[] = { batchSize, channelCount, height, width };
3061
3062 std::vector<float> input0({
3063 1, 1, 1, 1, 2, 2, 2, 2,
3064 3, 3, 3, 3, 4, 4, 4, 4 });
3065
3066 std::vector<float> input1({
3067 2, 2, 2, 2, 3, 3, 3, 3,
3068 4, 4, 4, 4, 5, 5, 5, 5 });
3069
3070 std::vector<float> output({
3071 2, 2, 2, 2, 6, 6, 6, 6,
3072 12, 12, 12, 12, 20, 20, 20, 20 });
3073
3074 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003075 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003076 shape,
3077 input0,
3078 shape,
3079 input1,
3080 shape,
3081 output);
3082}
3083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003084LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3085 armnn::IWorkloadFactory& workloadFactory,
3086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003087{
3088 unsigned int shape0[] = { 1, 2, 2, 2 };
3089 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3090
3091 unsigned int shape1[] = { 1, 1, 1, 1 };
3092 std::vector<float> input1({ 2 });
3093
3094 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3095
3096 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003097 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003098 shape0,
3099 input0,
3100 shape1,
3101 input1,
3102 shape0,
3103 output);
3104}
3105
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003106LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3107 armnn::IWorkloadFactory& workloadFactory,
3108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003109{
3110 unsigned int shape0[] = { 1, 3, 3, 2 };
3111 std::vector<float> input0({
3112 1, 2, 3, 4, 5, 6,
3113 7, 8, 9, 10, 11, 12,
3114 13, 14, 15, 16, 17, 18});
3115
3116 unsigned int shape1[] = { 1, 1, 1, 2 };
3117 std::vector<float> input1({ 1, 2 });
3118
3119 std::vector<float> output({
3120 1, 4, 3, 8, 5, 12,
3121 7, 16, 9, 20, 11, 24,
3122 13, 28, 15, 32, 17, 36});
3123
3124 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003125 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003126 shape0,
3127 input0,
3128 shape1,
3129 input1,
3130 shape0,
3131 output);
3132}
telsoa014fcda012018-03-09 14:13:49 +00003133
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003134LayerTestResult<float,4> CompareMultiplicationTest(
3135 armnn::IWorkloadFactory& workloadFactory,
3136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3137 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003138{
3139 const unsigned int width = 16;
3140 const unsigned int height = 32;
3141 const unsigned int channelCount = 2;
3142 const unsigned int batchSize = 5;
3143
3144 armnn::TensorInfo inputTensorInfo0;
3145 armnn::TensorInfo inputTensorInfo1;
3146 armnn::TensorInfo outputTensorInfo;
3147
3148 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3149
3150 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3151 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3152 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3153
3154 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3155
3156 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3157 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3158
3159 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3160 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3161 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3162
3163 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3164 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3165 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3166
3167 armnn::MultiplicationQueueDescriptor data;
3168 armnn::WorkloadInfo info;
3169 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3170 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3171 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3172
3173 armnn::MultiplicationQueueDescriptor refData = data;
3174 armnn::WorkloadInfo refInfo = info;
3175 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3176 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3177 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3178
3179 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3180 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3181
3182 inputHandle0->Allocate();
3183 inputHandle1->Allocate();
3184 outputHandle->Allocate();
3185 inputHandle0Ref->Allocate();
3186 inputHandle1Ref->Allocate();
3187 outputHandleRef->Allocate();
3188
3189 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3190 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3191 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3192 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3193
Derek Lambertif30f7d32019-04-09 10:25:02 +01003194 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003195 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003196 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003197 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003198 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3199 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3200
3201 return comparisonResult;
3202}
3203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003204LayerTestResult<float,4> CompareBatchNormTest(
3205 armnn::IWorkloadFactory& workloadFactory,
3206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3207 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003208{
3209 const unsigned int width = 2;
3210 const unsigned int height = 3;
3211 const unsigned int channels = 5;
3212 const unsigned int batchSize = 3;
3213
3214 armnn::TensorInfo inputTensorInfo;
3215 armnn::TensorInfo outputTensorInfo;
3216 armnn::TensorInfo tensorInfo;
3217
3218 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3219 constexpr unsigned int tensorShape[] = {channels};
3220
3221 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3222 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3223 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3224
3225 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3226
3227 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3228 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3229 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3230 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3231
3232 LayerTestResult<float,4> ret(outputTensorInfo);
3233
3234 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3235 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3236
3237 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3238 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3239
3240 armnn::BatchNormalizationQueueDescriptor data;
3241 armnn::WorkloadInfo info;
3242 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3243 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3244 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3245 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3246
3247 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3248 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3249 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3250 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3251
3252 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3253 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3254 data.m_Mean = &meanTensor;
3255 data.m_Variance = &varianceTensor;
3256 data.m_Beta = &betaTensor;
3257 data.m_Gamma = &gammaTensor;
3258 data.m_Parameters.m_Eps = 0.01f;
3259
3260 armnn::BatchNormalizationQueueDescriptor refData = data;
3261 armnn::WorkloadInfo refInfo = info;
3262 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3263 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3264
3265 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3266 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3267
3268 inputHandle->Allocate();
3269 outputHandle->Allocate();
3270 inputHandleRef->Allocate();
3271 outputHandleRef->Allocate();
3272
3273 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3274 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3275
Derek Lambertif30f7d32019-04-09 10:25:02 +01003276 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003277 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003278 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003279 workloadRef->Execute();
3280
3281 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3282 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3283
3284 return ret;
3285}
3286
surmeh013537c2c2018-05-18 16:31:43 +01003287template<typename T>
3288void PermuteTensorData(
3289 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003291 const armnn::PermutationVector& mappings,
3292 armnn::TensorInfo & inputTensorInfo,
3293 const T * inputData,
3294 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003295{
surmeh013537c2c2018-05-18 16:31:43 +01003296 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3297 if (inputData == nullptr)
3298 {
3299 // Nullptr is an error in the test. By returning without doing the concatenation
3300 // I expect the caller to fail the test. It still makes sense to report this as
3301 // an assert for Debug builds.
3302 return;
3303 }
telsoa014fcda012018-03-09 14:13:49 +00003304
surmeh013537c2c2018-05-18 16:31:43 +01003305 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3306
3307 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3308 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3309
3310 armnn::PermuteQueueDescriptor queueDescriptor;
3311 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3312 armnn::WorkloadInfo workloadInfo;
3313 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3314 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3315
3316 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3317
3318 inputHandle->Allocate();
3319 outputHandle->Allocate();
3320
3321 CopyDataToITensorHandle(inputHandle.get(), inputData);
3322
Derek Lambertif30f7d32019-04-09 10:25:02 +01003323 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003324 workload->Execute();
3325
3326 outputData.resize(outputTensorInfo.GetNumElements());
3327 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3328 inputTensorInfo = outputTensorInfo;
3329}
3330
Jim Flynn825af452019-05-20 12:49:28 +01003331armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003332 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3333 unsigned int concatDim)
3334{
telsoa014fcda012018-03-09 14:13:49 +00003335 std::vector<armnn::TensorShape> shapes;
3336 shapes.reserve(inputTensorInfos.size());
3337 for (const armnn::TensorInfo& it: inputTensorInfos)
3338 {
3339 shapes.push_back(it.GetShape());
3340 }
surmeh013537c2c2018-05-18 16:31:43 +01003341
Jim Flynn825af452019-05-20 12:49:28 +01003342 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3343 shapes.end(),
3344 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003345}
3346
3347//
narpra015cdda352018-11-19 15:30:27 +00003348// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3349// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3350// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003351//
3352
3353bool NeedPermuteForConcat(
3354 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3355 unsigned int concatDim)
3356{
3357 // See note above. Additionally we expect the input shapes to have the
3358 // same number of dimensions.
3359 unsigned int nDimensions = 0;
3360
telsoa01c577f2c2018-08-31 09:22:23 +01003361 // Determine the number of dimensions as well as sanity check them
3362 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003363 for (auto && tensorInfo : inputTensorInfos)
3364 {
3365 if (!nDimensions)
3366 {
3367 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3368 }
3369 else
3370 {
3371 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3372 "Input shapes must have the same number of dimensions");
3373 }
3374 }
3375
narpra015cdda352018-11-19 15:30:27 +00003376 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003377}
3378
3379armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3380{
3381 unsigned int numDims = inputShape.GetNumDimensions();
3382 if (numDims >= 3)
3383 {
3384 // Nothing to do if the inputShape has at least 3 dimensions.
3385 return inputShape;
3386 }
3387
3388 std::vector<unsigned int> newDims(size_t(3), 1u);
3389 unsigned int expandedBy = 3 - numDims;
3390 for (unsigned int i=0; i<numDims; ++i)
3391 {
3392 newDims[expandedBy+i] = inputShape[i];
3393 }
3394 return armnn::TensorShape(3u, &newDims[0]);
3395}
3396
3397void Generate3dPermuteVectorForConcat(
3398 unsigned int numDimensions,
3399 unsigned int & concatDim,
3400 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3401{
3402 BOOST_ASSERT_MSG(numDimensions <= 3,
3403 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003404 unsigned int expandedBy = 3 - numDimensions;
3405 unsigned int expandedConcatAxis = concatDim + expandedBy;
3406
3407 if (expandedConcatAxis == 2)
3408 {
3409 concatDim = 0;
3410 armnn::PermutationVector forwardPermutation({1, 2, 0});
3411 armnn::PermutationVector reversePermutation({2, 0, 1});
3412 permutations = std::make_pair(forwardPermutation, reversePermutation);
3413 }
3414 else if (expandedConcatAxis == 1)
3415 {
3416 concatDim = 0;
3417 armnn::PermutationVector forwardPermutation({2, 0, 1});
3418 armnn::PermutationVector reversePermutation({1, 2, 0});
3419 permutations = std::make_pair(forwardPermutation, reversePermutation);
3420 }
3421 else
3422 {
3423 BOOST_ASSERT(expandedConcatAxis == 0);
3424 concatDim = 0;
3425 }
3426}
3427
3428//
3429// Permute the input tensors so we can do a supported concatenation.
3430// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3431// at the front. Finally this function tells what the output shape
3432// of the permuted concatenated tensor is going to be.
3433//
3434template <typename T>
3435void PermuteInputsForConcat(
3436 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003438 std::vector<armnn::TensorInfo> & inputTensorInfos,
3439 std::vector<T *> & inputData,
3440 std::vector<std::vector<T>> & inputDataStorage,
3441 armnn::PermutationVector & permuteVector,
3442 unsigned int & concatDim,
3443 armnn::TensorInfo & outputTensorInfo)
3444{
3445 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3446 "Expecting more than one tensor to be concatenated here");
3447
3448 unsigned int numDims = 0;
3449 unsigned int nthInput = 0;
3450 const armnn::PermutationVector identity({0, 1, 2});
3451
3452 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3453 std::make_pair(identity, identity);
3454
3455 inputDataStorage.resize(inputData.size());
3456
3457 for (auto && tensorInfo : inputTensorInfos)
3458 {
3459 if (numDims == 0)
3460 {
3461 numDims = tensorInfo.GetShape().GetNumDimensions();
3462 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003463
telsoa01c577f2c2018-08-31 09:22:23 +01003464 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003465 permuteVector = permutations.second;
3466 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3467 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3468 }
3469 else
3470 {
3471 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3472 "All inputs must have the same number of dimensions");
3473 }
3474
3475 armnn::TensorInfo newTensorInfo = tensorInfo;
3476 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3477
3478 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003479 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003480 permutations.first,
3481 newTensorInfo,
3482 inputData[nthInput],
3483 inputDataStorage[nthInput]);
3484
3485 inputData[nthInput] = inputDataStorage[nthInput].data();
3486 inputTensorInfos[nthInput] = newTensorInfo;
3487
3488 ++nthInput;
3489 }
3490
3491 outputTensorInfo.SetShape(
3492 armnnUtils::Permuted(
3493 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3494 permutations.first));
3495}
3496
3497
3498//
3499// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003500// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003501// output.
3502//
3503template <typename T>
3504void PermuteOutputForConcat(
3505 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003507 const armnn::TensorInfo & tensorInfo,
3508 const armnn::PermutationVector & permuteVector,
3509 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3510 T * data)
3511{
3512 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3513 if (data == nullptr)
3514 {
3515 // Nullptr is an error in the test. By returning without doing the permutation
3516 // I expect the caller to fail the test. It still makes sense to report this as
3517 // an assert for Debug builds.
3518 return;
3519 }
3520
3521 armnn::TensorInfo resultTensorInfo = tensorInfo;
3522 std::vector<T> inputData(tensorInfo.GetNumElements());
3523 std::vector<T> outputData;
3524
3525 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3526
3527 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003528 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003529 permuteVector,
3530 resultTensorInfo,
3531 &inputData[0],
3532 outputData);
3533
3534 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3535}
3536
3537template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003538void Concatenate(
3539 armnn::IWorkloadFactory& workloadFactory,
3540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3541 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3542 std::initializer_list<T *> inputsOrig,
3543 const armnn::TensorInfo& outputTensorInfoOrig,
3544 T * output,
narpra015cdda352018-11-19 15:30:27 +00003545 unsigned int concatDim,
3546 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003547{
3548 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3549 if (output == nullptr)
3550 {
3551 // Nullptr is an error in the test. By returning without doing the permutation
3552 // I expect the caller to fail the test. It still makes sense to report this as
3553 // an assert for Debug builds.
3554 return;
3555 }
3556
telsoa01c577f2c2018-08-31 09:22:23 +01003557 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003558 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3559 std::vector<T *> inputs = inputsOrig;
3560 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3561
3562 armnn::PermutationVector permuteVector{0, 1, 2};
3563
telsoa01c577f2c2018-08-31 09:22:23 +01003564 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003565 std::vector<std::vector<T>> tmpInputDataStorage;
3566
3567 const size_t inputCount = inputTensorInfos.size();
3568
3569 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3570
3571 if (needPermuteForConcat)
3572 {
3573 //
3574 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003575 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003576 //
3577 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003578 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003579 inputTensorInfos,
3580 inputs,
3581 tmpInputDataStorage,
3582 permuteVector,
3583 concatDim,
3584 outputTensorInfo);
3585 }
3586
narpra015cdda352018-11-19 15:30:27 +00003587 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003588
3589 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3590 inputHandles.reserve(inputCount);
3591
narpra015cdda352018-11-19 15:30:27 +00003592 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3593
Jim Flynne242f2d2019-05-22 14:24:13 +01003594 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01003595 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00003596 queueDescriptor.m_Parameters = viewsDescriptor;
3597
3598 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003599 {
narpra015cdda352018-11-19 15:30:27 +00003600 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3601 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3602 {
3603 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3604 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3605 }
telsoa014fcda012018-03-09 14:13:49 +00003606
narpra015cdda352018-11-19 15:30:27 +00003607 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003608
narpra015cdda352018-11-19 15:30:27 +00003609 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3610 for (unsigned int i = 0; i < inputCount; ++i)
3611 {
3612 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3613 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3614 subTensorsSupported ?
3615 workloadFactory.CreateSubTensorHandle(*outputHandle,
3616 inputTensorInfo.GetShape(),
3617 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3618 workloadFactory.CreateTensorHandle(inputTensorInfo);
3619
3620 inputHandles.emplace_back(std::move(inputHandle));
3621 }
3622
telsoa014fcda012018-03-09 14:13:49 +00003623 }
narpra015cdda352018-11-19 15:30:27 +00003624 else
3625 {
3626 for (unsigned int i = 0; i < inputCount; ++i)
3627 {
3628 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3629 inputHandles.emplace_back(std::move(inputHandle));
3630 }
3631 }
telsoa014fcda012018-03-09 14:13:49 +00003632
3633 for (unsigned int i = 0; i < inputCount; ++i)
3634 {
surmeh013537c2c2018-05-18 16:31:43 +01003635 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003636 }
3637
3638 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3639
Jim Flynn4ed6c832019-05-20 11:02:46 +01003640 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00003641
3642 for (auto& inputHandle : inputHandles)
3643 {
3644 inputHandle->Allocate();
3645 }
3646
3647 outputHandle->Allocate();
3648
3649 unsigned int nextInputId = 0;
3650 for (auto& inputHandle : inputHandles)
3651 {
surmeh013537c2c2018-05-18 16:31:43 +01003652 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3653 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003654 }
3655
Derek Lambertif30f7d32019-04-09 10:25:02 +01003656 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003657 workload->Execute();
3658
surmeh013537c2c2018-05-18 16:31:43 +01003659 if (needPermuteForConcat)
3660 {
3661 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003662 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003663 outputTensorInfo,
3664 permuteVector,
3665 std::move(outputHandle),
3666 output);
3667 }
3668 else
3669 {
3670 CopyDataFromITensorHandle(output, outputHandle.get());
3671 }
telsoa014fcda012018-03-09 14:13:49 +00003672}
3673
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003674template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003675LayerTestResult<T, 1> Concatenation1dTestImpl(
3676 armnn::IWorkloadFactory& workloadFactory,
3677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3678 float qScale,
3679 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003680{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003681 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003682
3683 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3684 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3685 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3686
Jim Flynncbb66aa2019-05-15 13:03:54 +01003687 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003688
3689 LayerTestResult<T, 1> result(outputTensorInfo);
3690
3691 std::vector<T> output;
3692 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003693 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003694 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3695 { input0.data(), input1.data(), input2.data() },
3696 outputTensorInfo,
3697 output.data(),
3698 0,
3699 true);
telsoa014fcda012018-03-09 14:13:49 +00003700
3701 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3702 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3703 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3704 }));
3705
3706 return result;
3707}
3708
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003709LayerTestResult<float, 1> Concatenation1dTest(
3710 armnn::IWorkloadFactory& workloadFactory,
3711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003712{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003713 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003714}
3715
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003716template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003717LayerTestResult<T, 2> Concatenation2dTestImpl(
3718 armnn::IWorkloadFactory& workloadFactory,
3719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003720 const armnn::TensorInfo& outputTensorInfo,
3721 unsigned int dimension,
3722 const float qScale,
3723 const int32_t qOffset)
3724{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003725 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003726
3727 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3728 // Batch 0
3729 1.0f, 2.0f, 3.0f,
3730
3731 // Batch 1
3732 10.0f, 11.0f, 12.0f,
3733 }));
3734
3735 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3736 // Batch 0
3737 4.0f, 5.0f, 6.0f,
3738
3739 // Batch 1
3740 13.0f, 14.0f, 15.0f,
3741 }));
3742
3743 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3744 // Batch 0
3745 7.0f, 8.0f, 9.0f,
3746
3747 // Batch 1
3748 16.0f, 17.0f, 18.0f,
3749 }));
3750
3751 LayerTestResult<T, 2> result(outputTensorInfo);
3752
3753 std::vector<T> output;
3754 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003755 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003756 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3757 { input0.data(), input1.data(), input2.data() },
3758 outputTensorInfo,
3759 output.data(),
3760 dimension,
3761 true);
telsoa014fcda012018-03-09 14:13:49 +00003762
3763 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3764 return result;
3765}
3766
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003767template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003768LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3769 armnn::IWorkloadFactory& workloadFactory,
3770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3771 float qScale,
3772 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003773{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003774 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003775
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003776 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3777 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3778
telsoa014fcda012018-03-09 14:13:49 +00003779 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3780 // Batch 0
3781 1.0f, 2.0f, 3.0f,
3782
3783 // Batch 1
3784 10.0f, 11.0f, 12.0f,
3785
3786 // Batch 2
3787 4.0f, 5.0f, 6.0f,
3788
3789 // Batch 3
3790 13.0f, 14.0f, 15.0f,
3791
3792 // Batch 4
3793 7.0f, 8.0f, 9.0f,
3794
3795 // Batch 5
3796 16.0f, 17.0f, 18.0f,
3797 }));
3798
3799 return result;
3800}
3801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003802LayerTestResult<float, 2> Concatenation2dDim0Test(
3803 armnn::IWorkloadFactory& workloadFactory,
3804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003805{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003806 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003807}
3808
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003809template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003810LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3811 armnn::IWorkloadFactory& workloadFactory,
3812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3813 float qScale,
3814 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003815{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003816 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003817
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003818 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3819 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3820
telsoa014fcda012018-03-09 14:13:49 +00003821 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3822 // Batch 0
3823 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3824
3825 // Batch 1
3826 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3827 }));
3828
3829 return result;
3830}
3831
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003832LayerTestResult<float, 2> Concatenation2dDim1Test(
3833 armnn::IWorkloadFactory& workloadFactory,
3834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003836 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003837}
3838
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003839template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003840LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3841 armnn::IWorkloadFactory& workloadFactory,
3842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3843 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003844 int32_t qOffset)
3845{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003846 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003847 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3848 // Batch 0
3849 1.0f, 2.0f, 3.0f,
3850
3851 // Batch 1
3852 10.0f, 11.0f, 12.0f,
3853 }));
3854
Jim Flynncbb66aa2019-05-15 13:03:54 +01003855 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003856 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3857 // Batch 0
3858 4.0f, 5.0f, 6.0f,
3859
3860 // Batch 1
3861 13.0f, 14.0f, 15.0f,
3862
3863 // Batch 0
3864 7.0f, 8.0f, 9.0f,
3865 }));
3866
Jim Flynncbb66aa2019-05-15 13:03:54 +01003867 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003868 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3869 // Batch 1
3870 16.0f, 17.0f, 18.0f,
3871 }));
3872
Jim Flynncbb66aa2019-05-15 13:03:54 +01003873 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003874 LayerTestResult<T, 2> result(outputTensorInfo);
3875
3876 std::vector<T> output;
3877 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003878 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003879 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3880 { input0.data(), input1.data(), input2.data() },
3881 outputTensorInfo,
3882 output.data(),
3883 0,
3884 true);
telsoa014fcda012018-03-09 14:13:49 +00003885
3886 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3887 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3888 // Batch 0
3889 1.0f, 2.0f, 3.0f,
3890
3891 // Batch 1
3892 10.0f, 11.0f, 12.0f,
3893
3894 // Batch 2
3895 4.0f, 5.0f, 6.0f,
3896
3897 // Batch 3
3898 13.0f, 14.0f, 15.0f,
3899
3900 // Batch 4
3901 7.0f, 8.0f, 9.0f,
3902
3903 // Batch 5
3904 16.0f, 17.0f, 18.0f,
3905 }));
3906
3907 return result;
3908}
3909
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003910LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3911 armnn::IWorkloadFactory& workloadFactory,
3912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003913{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003914 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3915 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003916}
3917
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003918template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003919LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3920 armnn::IWorkloadFactory& workloadFactory,
3921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3922 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003923 int32_t qOffset)
3924{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003925 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003926 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3927 // Batch 0
3928 1.0f, 2.0f, 3.0f,
3929
3930 // Batch 1
3931 10.0f, 11.0f, 12.0f,
3932 }));
3933
Jim Flynncbb66aa2019-05-15 13:03:54 +01003934 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003935 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3936 // Batch 0
3937 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3938
3939 // Batch 1
3940 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3941 }));
3942
Jim Flynncbb66aa2019-05-15 13:03:54 +01003943 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003944 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3945 // Batch 0
3946 9.0f,
3947
3948 // Batch 1
3949 18.0f
3950 }));
3951
Jim Flynncbb66aa2019-05-15 13:03:54 +01003952 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003953 LayerTestResult<T, 2> result(outputTensorInfo);
3954
3955 std::vector<T> output;
3956 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003957 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003958 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3959 { input0.data(), input1.data(), input2.data() },
3960 outputTensorInfo,
3961 output.data(),
3962 1,
3963 true);
telsoa014fcda012018-03-09 14:13:49 +00003964
3965 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3966 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3967 // Batch 0
3968 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3969
3970 // Batch 1
3971 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3972 }));
3973
3974 return result;
3975}
3976
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003977LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3978 armnn::IWorkloadFactory& workloadFactory,
3979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003980{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003981 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3982 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003983}
3984
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003985template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003986LayerTestResult<T, 3> Concatenation3dTestImpl(
3987 armnn::IWorkloadFactory& workloadFactory,
3988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003989 const armnn::TensorInfo& outputTensorInfo,
3990 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003991 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003992 float qScale,
3993 int32_t qOffset)
3994{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003995 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003996
3997 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3998 // Batch 0, Channel 0
3999 1.0f, 2.0f,
4000
4001 // Batch 0, Channel 1
4002 3.0f, 4.0f,
4003
4004 // Batch 0, Channel 2
4005 5.0f, 6.0f,
4006
4007 // Batch 1, Channel 0
4008 19.0f, 20.0f,
4009
4010 // Batch 1, Channel 1
4011 21.0f, 22.0f,
4012
4013 // Batch 1, Channel 2
4014 23.0f, 24.0f
4015 }));
4016
4017 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4018 // Batch 0, Channel 0
4019 7.0f, 8.0f,
4020
4021 // Batch 0, Channel 1
4022 9.0f, 10.0f,
4023
4024 // Batch 0, Channel 2
4025 11.0f, 12.0f,
4026
4027 // Batch 1, Channel 0
4028 25.0f, 26.0f,
4029
4030 // Batch 1, Channel 1
4031 27.0f, 28.0f,
4032
4033 // Batch 1, Channel 2
4034 29.0f, 30.0f
4035 }));
4036
4037 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4038 // Batch 0, Channel 0
4039 13.0f, 14.0f,
4040
4041 // Batch 0, Channel 1
4042 15.0f, 16.0f,
4043
4044 // Batch 0, Channel 2
4045 17.0f, 18.0f,
4046
4047 // Batch 1, Channel 0
4048 31.0f, 32.0f,
4049
4050 // Batch 1, Channel 1
4051 33.0f, 34.0f,
4052
4053 // Batch 1, Channel 2
4054 35.0f, 36.0f
4055 }));
4056
4057 LayerTestResult<T, 3> result(outputTensorInfo);
4058
4059 std::vector<T> output;
4060 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004061 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004062 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4063 { input0.data(), input1.data(), input2.data() },
4064 outputTensorInfo,
4065 output.data(),
4066 dimension,
4067 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004068
4069 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4070 return result;
4071}
4072
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004073template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004074LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4075 armnn::IWorkloadFactory& workloadFactory,
4076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4077 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004078 int32_t qOffset)
4079{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004080 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004081
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004082 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4083 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4084
telsoa014fcda012018-03-09 14:13:49 +00004085 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4086 // Batch 0, Channel 0
4087 1.0f, 2.0f,
4088
4089 // Batch 0, Channel 1
4090 3.0f, 4.0f,
4091
4092 // Batch 0, Channel 2
4093 5.0f, 6.0f,
4094
4095 // Batch 1, Channel 0
4096 19.0f, 20.0f,
4097
4098 // Batch 1, Channel 1
4099 21.0f, 22.0f,
4100
4101 // Batch 1, Channel 2
4102 23.0f, 24.0f,
4103
4104 // Batch 2, Channel 0
4105 7.0f, 8.0f,
4106
4107 // Batch 2, Channel 1
4108 9.0f, 10.0f,
4109
4110 // Batch 2, Channel 2
4111 11.0f, 12.0f,
4112
4113 // Batch 3, Channel 0
4114 25.0f, 26.0f,
4115
4116 // Batch 3, Channel 1
4117 27.0f, 28.0f,
4118
4119 // Batch 3, Channel 2
4120 29.0f, 30.0f,
4121
4122 // Batch 4, Channel 0
4123 13.0f, 14.0f,
4124
4125 // Batch 4, Channel 1
4126 15.0f, 16.0f,
4127
4128 // Batch 4, Channel 2
4129 17.0f, 18.0f,
4130
4131 // Batch 5, Channel 0
4132 31.0f, 32.0f,
4133
4134 // Batch 5, Channel 1
4135 33.0f, 34.0f,
4136
4137 // Batch 5, Channel 2
4138 35.0f, 36.0f
4139 }));
narpra015cdda352018-11-19 15:30:27 +00004140
telsoa014fcda012018-03-09 14:13:49 +00004141 return result;
4142}
4143
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004144LayerTestResult<float, 3> Concatenation3dDim0Test(
4145 armnn::IWorkloadFactory& workloadFactory,
4146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004147{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004148 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004149}
4150
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004151template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004152LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4153 armnn::IWorkloadFactory& workloadFactory,
4154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4155 float qScale,
4156 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004157{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004158 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004159
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004160 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4161 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004162
telsoa014fcda012018-03-09 14:13:49 +00004163 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4164 // Batch 0, Channel 0
4165 1.0f, 2.0f,
4166
4167 // Batch 0, Channel 1
4168 3.0f, 4.0f,
4169
4170 // Batch 0, Channel 2
4171 5.0f, 6.0f,
4172
4173 // Batch 0, Channel 3
4174 7.0f, 8.0f,
4175
4176 // Batch 0, Channel 4
4177 9.0f, 10.0f,
4178
4179 // Batch 0, Channel 5
4180 11.0f, 12.0f,
4181
4182 // Batch 0, Channel 6
4183 13.0f, 14.0f,
4184
4185 // Batch 0, Channel 7
4186 15.0f, 16.0f,
4187
4188 // Batch 0, Channel 8
4189 17.0f, 18.0f,
4190
4191 // Batch 1, Channel 0
4192 19.0f, 20.0f,
4193
4194 // Batch 1, Channel 1
4195 21.0f, 22.0f,
4196
4197 // Batch 1, Channel 2
4198 23.0f, 24.0f,
4199
4200 // Batch 1, Channel 3
4201 25.0f, 26.0f,
4202
4203 // Batch 1, Channel 4
4204 27.0f, 28.0f,
4205
4206 // Batch 1, Channel 5
4207 29.0f, 30.0f,
4208
4209 // Batch 1, Channel 6
4210 31.0f, 32.0f,
4211
4212 // Batch 1, Channel 7
4213 33.0f, 34.0f,
4214
4215 // Batch 1, Channel 8
4216 35.0f, 36.0f
4217 }));
4218
4219 return result;
4220}
4221
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004222LayerTestResult<float, 3> Concatenation3dDim1Test(
4223 armnn::IWorkloadFactory& workloadFactory,
4224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004225{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004226 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004227}
4228
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004229template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004230LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4231 armnn::IWorkloadFactory& workloadFactory,
4232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004233 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004234 float qScale,
4235 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004236{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004237 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004238
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004239 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4240 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004241
telsoa014fcda012018-03-09 14:13:49 +00004242 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4243 // Batch 0, Channel 0
4244 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4245
4246 // Batch 0, Channel 1
4247 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4248
4249 // Batch 0, Channel 2
4250 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4251
4252 // Batch 1, Channel 0
4253 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4254
4255 // Batch 1, Channel 1
4256 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4257
4258 // Batch 1, Channel 2
4259 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4260 }));
4261
4262 return result;
4263}
4264
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004265LayerTestResult<float, 3> Concatenation3dDim2Test(
4266 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4268 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004269{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004270 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4271 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004272}
4273
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004274template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004275LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4276 armnn::IWorkloadFactory& workloadFactory,
4277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4278 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004279 int32_t qOffset)
4280{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004281 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004282 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4283 // Batch 0, Channel 0
4284 1.0f, 2.0f,
4285
4286 // Batch 0, Channel 1
4287 3.0f, 4.0f,
4288
4289 // Batch 0, Channel 2
4290 5.0f, 6.0f,
4291
4292 // Batch 1, Channel 0
4293 19.0f, 20.0f,
4294
4295 // Batch 1, Channel 1
4296 21.0f, 22.0f,
4297
4298 // Batch 1, Channel 2
4299 23.0f, 24.0f
4300 }));
4301
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004302 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004303 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4304 // Batch 0, Channel 0
4305 7.0f, 8.0f,
4306
4307 // Batch 0, Channel 1
4308 9.0f, 10.0f,
4309
4310 // Batch 0, Channel 2
4311 11.0f, 12.0f,
4312 }));
4313
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004314 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004315 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4316 // Batch 0, Channel 0
4317 25.0f, 26.0f,
4318
4319 // Batch 0, Channel 1
4320 27.0f, 28.0f,
4321
4322 // Batch 0, Channel 2
4323 29.0f, 30.0f,
4324
4325 // Batch 1, Channel 0
4326 13.0f, 14.0f,
4327
4328 // Batch 1, Channel 1
4329 15.0f, 16.0f,
4330
4331 // Batch 1, Channel 2
4332 17.0f, 18.0f,
4333
4334 // Batch 2, Channel 0
4335 31.0f, 32.0f,
4336
4337 // Batch 2, Channel 1
4338 33.0f, 34.0f,
4339
4340 // Batch 2, Channel 2
4341 35.0f, 36.0f
4342 }));
4343
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004344 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004345 LayerTestResult<T, 3> result(outputTensorInfo);
4346
4347 std::vector<T> output;
4348 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004349 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004350 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4351 { input0.data(), input1.data(), input2.data() },
4352 outputTensorInfo,
4353 output.data(),
4354 0,
4355 true);
telsoa014fcda012018-03-09 14:13:49 +00004356
4357 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4358 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4359 // Batch 0, Channel 0
4360 1.0f, 2.0f,
4361
4362 // Batch 0, Channel 1
4363 3.0f, 4.0f,
4364
4365 // Batch 0, Channel 2
4366 5.0f, 6.0f,
4367
4368 // Batch 1, Channel 0
4369 19.0f, 20.0f,
4370
4371 // Batch 1, Channel 1
4372 21.0f, 22.0f,
4373
4374 // Batch 1, Channel 2
4375 23.0f, 24.0f,
4376
4377 // Batch 2, Channel 0
4378 7.0f, 8.0f,
4379
4380 // Batch 2, Channel 1
4381 9.0f, 10.0f,
4382
4383 // Batch 2, Channel 2
4384 11.0f, 12.0f,
4385
4386 // Batch 3, Channel 0
4387 25.0f, 26.0f,
4388
4389 // Batch 3, Channel 1
4390 27.0f, 28.0f,
4391
4392 // Batch 3, Channel 2
4393 29.0f, 30.0f,
4394
4395 // Batch 4, Channel 0
4396 13.0f, 14.0f,
4397
4398 // Batch 4, Channel 1
4399 15.0f, 16.0f,
4400
4401 // Batch 4, Channel 2
4402 17.0f, 18.0f,
4403
4404 // Batch 5, Channel 0
4405 31.0f, 32.0f,
4406
4407 // Batch 5, Channel 1
4408 33.0f, 34.0f,
4409
4410 // Batch 5, Channel 2
4411 35.0f, 36.0f
4412 }));
4413
4414 return result;
4415}
4416
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004417LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4418 armnn::IWorkloadFactory& workloadFactory,
4419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004421 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4422 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004423}
4424
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004425template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004426LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4427 armnn::IWorkloadFactory& workloadFactory,
4428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4429 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004430 int32_t qOffset)
4431{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004432 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004433 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4434 // Batch 0, Channel 0
4435 1.0f, 2.0f,
4436
4437 // Batch 0, Channel 1
4438 3.0f, 4.0f,
4439
4440 // Batch 0, Channel 2
4441 5.0f, 6.0f,
4442
4443 // Batch 1, Channel 0
4444 19.0f, 20.0f,
4445
4446 // Batch 1, Channel 1
4447 21.0f, 22.0f,
4448
4449 // Batch 1, Channel 2
4450 23.0f, 24.0f
4451 }));
4452
Jim Flynncbb66aa2019-05-15 13:03:54 +01004453 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004454 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4455 // Batch 0, Channel 0
4456 7.0f, 8.0f,
4457
4458 // Batch 0, Channel 1
4459 9.0f, 10.0f,
4460
4461 // Batch 0, Channel 2
4462 11.0f, 12.0f,
4463
4464 // Batch 0, Channel 3
4465 25.0f, 26.0f,
4466
4467 // Batch 1, Channel 0
4468 27.0f, 28.0f,
4469
4470 // Batch 1, Channel 1
4471 29.0f, 30.0f,
4472
4473 // Batch 1, Channel 2
4474 13.0f, 14.0f,
4475
4476 // Batch 1, Channel 3
4477 15.0f, 16.0f,
4478 }));
4479
Jim Flynncbb66aa2019-05-15 13:03:54 +01004480 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004481 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4482 // Batch 0, Channel 0
4483 17.0f, 18.0f,
4484
4485 // Batch 1, Channel 0
4486 31.0f, 32.0f,
4487 }));
4488
Jim Flynncbb66aa2019-05-15 13:03:54 +01004489 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004490 LayerTestResult<T, 3> result(outputTensorInfo);
4491
4492 std::vector<T> output;
4493 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004494 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004495 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4496 { input0.data(), input1.data(), input2.data() },
4497 outputTensorInfo,
4498 output.data(),
4499 1,
4500 true);
telsoa014fcda012018-03-09 14:13:49 +00004501
4502 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4503 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4504 // Batch 0, Channel 0
4505 1.0f, 2.0f,
4506
4507 // Batch 0, Channel 1
4508 3.0f, 4.0f,
4509
4510 // Batch 0, Channel 2
4511 5.0f, 6.0f,
4512
4513 // Batch 0, Channel 3
4514 7.0f, 8.0f,
4515
4516 // Batch 0, Channel 4
4517 9.0f, 10.0f,
4518
4519 // Batch 0, Channel 5
4520 11.0f, 12.0f,
4521
4522 // Batch 0, Channel 6
4523 25.0f, 26.0f,
4524
4525 // Batch 0, Channel 7
4526 17.0f, 18.0f,
4527
4528 // Batch 1, Channel 0
4529 19.0f, 20.0f,
4530
4531 // Batch 1, Channel 1
4532 21.0f, 22.0f,
4533
4534 // Batch 1, Channel 2
4535 23.0f, 24.0f,
4536
4537 // Batch 1, Channel 3
4538 27.0f, 28.0f,
4539
4540 // Batch 1, Channel 4
4541 29.0f, 30.0f,
4542
4543 // Batch 1, Channel 5
4544 13.0f, 14.0f,
4545
4546 // Batch 1, Channel 6
4547 15.0f, 16.0f,
4548
4549 // Batch 1, Channel 7
4550 31.0f, 32.0f,
4551 }));
4552
4553 return result;
4554}
4555
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004556LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4557 armnn::IWorkloadFactory& workloadFactory,
4558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004559{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004560 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4561 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004562}
4563
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004564template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004565LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4566 armnn::IWorkloadFactory& workloadFactory,
4567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004568 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004569 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004570 int32_t qOffset)
4571{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004572 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004573 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4574 // Batch 0, Channel 0
4575 1.0f, 2.0f,
4576
4577 // Batch 0, Channel 1
4578 3.0f, 4.0f,
4579
4580 // Batch 0, Channel 2
4581 5.0f, 6.0f,
4582
4583 // Batch 1, Channel 0
4584 19.0f, 20.0f,
4585
4586 // Batch 1, Channel 1
4587 21.0f, 22.0f,
4588
4589 // Batch 1, Channel 2
4590 23.0f, 24.0f
4591 }));
4592
Jim Flynncbb66aa2019-05-15 13:03:54 +01004593 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004594 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4595 // Batch 0, Channel 0
4596 7.0f,
4597
4598 // Batch 0, Channel 1
4599 9.0f,
4600
4601 // Batch 0, Channel 2
4602 11.0f,
4603
4604 // Batch 1, Channel 0
4605 25.0f,
4606
4607 // Batch 1, Channel 1
4608 27.0f,
4609
4610 // Batch 1, Channel 2
4611 29.0f
4612 }));
4613
Jim Flynncbb66aa2019-05-15 13:03:54 +01004614 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004615 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4616 // Batch 0, Channel 0
4617 13.0f, 14.0f, 50.0f,
4618
4619 // Batch 0, Channel 1
4620 15.0f, 16.0f, 51.0f,
4621
4622 // Batch 0, Channel 2
4623 17.0f, 18.0f, 52.0f,
4624
4625 // Batch 1, Channel 0
4626 31.0f, 32.0f, 53.0f,
4627
4628 // Batch 1, Channel 1
4629 33.0f, 34.0f, 54.0f,
4630
4631 // Batch 1, Channel 2
4632 35.0f, 36.0f, 55.0f,
4633 }));
4634
Jim Flynncbb66aa2019-05-15 13:03:54 +01004635 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004636 LayerTestResult<T, 3> result(outputTensorInfo);
4637
4638 std::vector<T> output;
4639 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004640 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004641 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4642 { input0.data(), input1.data(), input2.data() },
4643 outputTensorInfo,
4644 output.data(),
4645 2,
4646 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004647
4648 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4649 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4650 // Batch 0, Channel 0
4651 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4652
4653 // Batch 0, Channel 1
4654 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4655
4656 // Batch 0, Channel 2
4657 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4658
4659 // Batch 1, Channel 0
4660 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4661
4662 // Batch 1, Channel 1
4663 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4664
4665 // Batch 1, Channel 2
4666 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4667 }));
4668
4669 return result;
4670}
4671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004672LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4673 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4675 bool useSubtensor)
4676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004677 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4678 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004679}
4680
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004681template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004682LayerTestResult<T, 4> Concatenation4dTestImpl(
4683 armnn::IWorkloadFactory& workloadFactory,
4684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4685 const armnn::TensorInfo& outputTensorInfo,
4686 unsigned int dimension,
4687 bool useSubtensor,
4688 float qScale,
4689 int32_t qOffset)
4690{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004691 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004692
4693 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4694 1.0f, 2.0f,
4695 3.0f, 4.0f,
4696 5.0f, 6.0f,
4697 7.0f, 8.0f,
4698 9.0f, 10.0f,
4699 11.0f, 12.0f
4700 }));
4701
4702 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4703 11.0f, 12.0f,
4704 13.0f, 14.0f,
4705 15.0f, 16.0f,
4706 17.0f, 18.0f,
4707 19.0f, 20.0f,
4708 21.0f, 22.0f
4709 }));
4710
4711 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4712 21.0f, 22.0f,
4713 23.0f, 24.0f,
4714 25.0f, 26.0f,
4715 27.0f, 28.0f,
4716 29.0f, 30.0f,
4717 31.0f, 32.0f
4718 }));
4719
4720 LayerTestResult<T, 4> result(outputTensorInfo);
4721
4722 std::vector<T> output;
4723 output.resize(outputTensorInfo.GetNumElements());
4724
4725 Concatenate<T>(workloadFactory,
4726 memoryManager,
4727 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4728 {input0.data(), input1.data(), input2.data()},
4729 outputTensorInfo,
4730 output.data(),
4731 dimension,
4732 useSubtensor);
4733
4734 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4735 return result;
4736}
4737
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004738template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004739LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4740 armnn::IWorkloadFactory& workloadFactory,
4741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4742 float qScale,
4743 int32_t qOffset)
4744{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004745 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004746
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004747 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4748 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4749
narpra015cdda352018-11-19 15:30:27 +00004750 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4751 1.0f, 2.0f,
4752 3.0f, 4.0f,
4753 5.0f, 6.0f,
4754 7.0f, 8.0f,
4755 9.0f, 10.0f,
4756 11.0f, 12.0f,
4757
4758 11.0f, 12.0f,
4759 13.0f, 14.0f,
4760 15.0f, 16.0f,
4761 17.0f, 18.0f,
4762 19.0f, 20.0f,
4763 21.0f, 22.0f,
4764
4765 21.0f, 22.0f,
4766 23.0f, 24.0f,
4767 25.0f, 26.0f,
4768 27.0f, 28.0f,
4769 29.0f, 30.0f,
4770 31.0f, 32.0f
4771 }));
4772 return result;
4773}
4774
4775LayerTestResult<float, 4> Concatenation4dDim0Test(
4776 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004778{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004779 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004780}
4781
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004782template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004783LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4784 armnn::IWorkloadFactory& workloadFactory,
4785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4786 float qScale,
4787 int32_t qOffset)
4788{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004789 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004790
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004791 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4792 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4793
narpra015cdda352018-11-19 15:30:27 +00004794 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4795 1.0f, 2.0f,
4796 3.0f, 4.0f,
4797 5.0f, 6.0f,
4798 7.0f, 8.0f,
4799 9.0f, 10.0f,
4800 11.0f, 12.0f,
4801
4802 11.0f, 12.0f,
4803 13.0f, 14.0f,
4804 15.0f, 16.0f,
4805 17.0f, 18.0f,
4806 19.0f, 20.0f,
4807 21.0f, 22.0f,
4808
4809 21.0f, 22.0f,
4810 23.0f, 24.0f,
4811 25.0f, 26.0f,
4812 27.0f, 28.0f,
4813 29.0f, 30.0f,
4814 31.0f, 32.0f
4815 }));
4816
4817 return result;
4818}
4819
4820LayerTestResult<float, 4> Concatenation4dDim1Test(
4821 armnn::IWorkloadFactory& workloadFactory,
4822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4823{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004824 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004825}
4826
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004827template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004828LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4829 armnn::IWorkloadFactory& workloadFactory,
4830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4831 float qScale,
4832 int32_t qOffset)
4833{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004834 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004835
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004836 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4837 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4838
narpra015cdda352018-11-19 15:30:27 +00004839 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4840 1.0f, 2.0f,
4841 3.0f, 4.0f,
4842 11.0f, 12.0f,
4843 13.0f, 14.0f,
4844 21.0f, 22.0f,
4845 23.0f, 24.0f,
4846
4847 5.0f, 6.0f,
4848 7.0f, 8.0f,
4849 15.0f, 16.0f,
4850 17.0f, 18.0f,
4851 25.0f, 26.0f,
4852 27.0f, 28.0f,
4853
4854 9.0f, 10.0f,
4855 11.0f, 12.0f,
4856 19.0f, 20.0f,
4857 21.0f, 22.0f,
4858 29.0f, 30.0f,
4859 31.0f, 32.0f
4860 }));
4861
4862 return result;
4863}
4864
4865LayerTestResult<float, 4> Concatenation4dDim2Test(
4866 armnn::IWorkloadFactory& workloadFactory,
4867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4868{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004869 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004870}
4871
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004872template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004873LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4874 armnn::IWorkloadFactory& workloadFactory,
4875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4876 float qScale,
4877 int32_t qOffset,
4878 bool useSubtensor)
4879{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004880 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004881
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004882 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4883 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4884
narpra015cdda352018-11-19 15:30:27 +00004885 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4886 1.0f, 2.0f,
4887 11.0f, 12.0f,
4888 21.0f, 22.0f,
4889 3.0f, 4.0f,
4890 13.0f, 14.0f,
4891 23.0f, 24.0f,
4892
4893 5.0f, 6.0f,
4894 15.0f, 16.0f,
4895 25.0f, 26.0f,
4896 7.0f, 8.0f,
4897 17.0f, 18.0f,
4898 27.0f, 28.0f,
4899
4900 9.0f, 10.0f,
4901 19.0f, 20.0f,
4902 29.0f, 30.0f,
4903 11.0f, 12.0f,
4904 21.0f, 22.0f,
4905 31.0f, 32.0f
4906 }));
4907
4908 return result;
4909}
4910
4911LayerTestResult<float, 4> Concatenation4dDim3Test(
4912 armnn::IWorkloadFactory& workloadFactory,
4913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4914 bool useSubtensor)
4915{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004916 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4917 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004918}
4919
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004920template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004921LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4922 armnn::IWorkloadFactory& workloadFactory,
4923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4924 float qScale,
4925 int32_t qOffset)
4926{
4927 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004928 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004929
4930 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4931 1.0f, 2.0f,
4932 3.0f, 4.0f,
4933 5.0f, 6.0f,
4934 7.0f, 8.0f,
4935 9.0f, 10.0f,
4936 11.0f, 12.0f
4937 }));
4938
Jim Flynncbb66aa2019-05-15 13:03:54 +01004939 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004940
4941 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4942 11.0f, 12.0f,
4943 13.0f, 14.0f,
4944 15.0f, 16.0f,
4945 17.0f, 18.0f,
4946 19.0f, 20.0f,
4947 21.0f, 22.0f,
4948
4949 21.0f, 22.0f,
4950 23.0f, 24.0f,
4951 25.0f, 26.0f,
4952 27.0f, 28.0f,
4953 29.0f, 30.0f,
4954 31.0f, 32.0f
4955
4956 }));
4957
Jim Flynncbb66aa2019-05-15 13:03:54 +01004958 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004959
4960 LayerTestResult<T, 4> result(outputTensorInfo);
4961
4962 std::vector<T> output;
4963 output.resize(outputTensorInfo.GetNumElements());
4964 Concatenate<T>(workloadFactory,
4965 memoryManager,
4966 {inputTensorInfo0, inputTensorInfo1},
4967 {input0.data(), input1.data()},
4968 outputTensorInfo,
4969 output.data(),
4970 dimension,
4971 true);
4972
4973 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4974 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4975 1.0f, 2.0f,
4976 3.0f, 4.0f,
4977 5.0f, 6.0f,
4978 7.0f, 8.0f,
4979 9.0f, 10.0f,
4980 11.0f, 12.0f,
4981
4982 11.0f, 12.0f,
4983 13.0f, 14.0f,
4984 15.0f, 16.0f,
4985 17.0f, 18.0f,
4986 19.0f, 20.0f,
4987 21.0f, 22.0f,
4988
4989 21.0f, 22.0f,
4990 23.0f, 24.0f,
4991 25.0f, 26.0f,
4992 27.0f, 28.0f,
4993 29.0f, 30.0f,
4994 31.0f, 32.0f
4995 }));
4996
4997 return result;
4998}
4999
5000LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5001 armnn::IWorkloadFactory& workloadFactory,
5002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5003{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005004 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5005 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005006}
5007
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005008template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005009LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5010 armnn::IWorkloadFactory& workloadFactory,
5011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5012 float qScale,
5013 int32_t qOffset)
5014{
5015 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005016 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005017
5018 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5019 1.0f, 2.0f,
5020 3.0f, 4.0f,
5021 5.0f, 6.0f,
5022 7.0f, 8.0f,
5023 9.0f, 10.0f,
5024 11.0f, 12.0f
5025 }));
5026
Jim Flynncbb66aa2019-05-15 13:03:54 +01005027 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005028
5029 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5030 11.0f, 12.0f,
5031 13.0f, 14.0f,
5032 15.0f, 16.0f,
5033 17.0f, 18.0f,
5034
5035 }));
5036
Jim Flynncbb66aa2019-05-15 13:03:54 +01005037 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005038
5039 LayerTestResult<T, 4> result(outputTensorInfo);
5040
5041 std::vector<T> output;
5042 output.resize(outputTensorInfo.GetNumElements());
5043 Concatenate<T>(workloadFactory,
5044 memoryManager,
5045 {inputTensorInfo0, inputTensorInfo1},
5046 {input0.data(), input1.data()},
5047 outputTensorInfo,
5048 output.data(),
5049 dimension,
5050 true);
5051
5052 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5053 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5054 1.0f, 2.0f,
5055 3.0f, 4.0f,
5056 5.0f, 6.0f,
5057 7.0f, 8.0f,
5058 9.0f, 10.0f,
5059 11.0f, 12.0f,
5060 11.0f, 12.0f,
5061 13.0f, 14.0f,
5062 15.0f, 16.0f,
5063 17.0f, 18.0f
5064 }));
5065
5066 return result;
5067}
5068
5069LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5070 armnn::IWorkloadFactory& workloadFactory,
5071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5072{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005073 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5074 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005075}
5076
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005077template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005078LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5079 armnn::IWorkloadFactory& workloadFactory,
5080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5081 float qScale,
5082 int32_t qOffset)
5083{
5084 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005085 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005086
5087 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5088 1.0f, 2.0f,
5089 3.0f, 4.0f,
5090 5.0f, 6.0f,
5091 7.0f, 8.0f,
5092 9.0f, 10.0f,
5093 11.0f, 12.0f
5094 }));
5095
Jim Flynncbb66aa2019-05-15 13:03:54 +01005096 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005097
5098 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5099 11.0f, 12.0f,
5100 13.0f, 14.0f,
5101 15.0f, 16.0f,
5102 17.0f, 18.0f,
5103 19.0f, 20.0f,
5104 21.0f, 22.0f,
5105 23.0f, 24.0f,
5106 25.0f, 26.0f,
5107 27.0f, 28.0f
5108 }));
5109
Jim Flynncbb66aa2019-05-15 13:03:54 +01005110 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005111
5112 LayerTestResult<T, 4> result(outputTensorInfo);
5113
5114 std::vector<T> output;
5115 output.resize(outputTensorInfo.GetNumElements());
5116 Concatenate<T>(workloadFactory,
5117 memoryManager,
5118 {inputTensorInfo0, inputTensorInfo1},
5119 {input0.data(), input1.data()},
5120 outputTensorInfo,
5121 output.data(),
5122 dimension,
5123 true);
5124
5125 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5126 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5127 1.0f, 2.0f,
5128 3.0f, 4.0f,
5129 11.0f, 12.0f,
5130 13.0f, 14.0f,
5131 15.0f, 16.0f,
5132
5133 5.0f, 6.0f,
5134 7.0f, 8.0f,
5135 17.0f, 18.0f,
5136 19.0f, 20.0f,
5137 21.0f, 22.0f,
5138
5139 9.0f, 10.0f,
5140 11.0f, 12.0f,
5141 23.0f, 24.0f,
5142 25.0f, 26.0f,
5143 27.0f, 28.0f
5144 }));
5145
5146 return result;
5147}
5148
5149LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5150 armnn::IWorkloadFactory& workloadFactory,
5151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5152{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005153 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5154 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005155}
5156
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005157template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005158LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5159 armnn::IWorkloadFactory& workloadFactory,
5160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5161 float qScale,
5162 int32_t qOffset,
5163 bool useSubtensor)
5164{
5165 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005166 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005167
5168 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5169 1.0f, 2.0f,
5170 3.0f, 4.0f,
5171 5.0f, 6.0f,
5172 7.0f, 8.0f,
5173 9.0f, 10.0f,
5174 11.0f, 12.0f
5175 }));
5176
Jim Flynncbb66aa2019-05-15 13:03:54 +01005177 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005178
5179 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5180 11.0f, 12.0f, 13.0f,
5181 14.0f, 15.0f, 16.0f,
5182
5183 17.0f, 18.0f, 19.0f,
5184 20.0f, 21.0f, 22.0f,
5185
5186 23.0f, 24.0f, 25.0f,
5187 26.0f, 27.0f, 28.0f
5188 }));
5189
Jim Flynncbb66aa2019-05-15 13:03:54 +01005190 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005191
5192 LayerTestResult<T, 4> result(outputTensorInfo);
5193
5194 std::vector<T> output;
5195 output.resize(outputTensorInfo.GetNumElements());
5196 Concatenate<T>(workloadFactory,
5197 memoryManager,
5198 {inputTensorInfo0, inputTensorInfo1},
5199 {input0.data(), input1.data()},
5200 outputTensorInfo,
5201 output.data(),
5202 dimension,
5203 useSubtensor);
5204
5205 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5206 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5207 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5208 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5209 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5210 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5211 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5212 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5213 }));
5214
5215 return result;
5216}
5217
5218LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5219 armnn::IWorkloadFactory& workloadFactory,
5220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5221 bool useSubtensor)
5222{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005223 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5224 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005225}
5226
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005227LayerTestResult<float, 4> ResizeBilinearNopTest(
5228 armnn::IWorkloadFactory& workloadFactory,
5229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005230 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005231{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005232 const armnn::TensorInfo inputTensorInfo =
5233 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5234
5235 const armnn::TensorInfo outputTensorInfo =
5236 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005237
James Conroy6b965822018-11-01 11:33:09 +00005238 std::vector<float> inputData({
5239 1.0f, 2.0f, 3.0f, 4.0f,
5240 2.0f, 3.0f, 4.0f, 5.0f,
5241 3.0f, 4.0f, 5.0f, 6.0f,
5242 4.0f, 5.0f, 6.0f, 7.0f,
5243
telsoa014fcda012018-03-09 14:13:49 +00005244 1.0f, 2.0f, 3.0f, 4.0f,
5245 2.0f, 3.0f, 4.0f, 5.0f,
5246 3.0f, 4.0f, 5.0f, 6.0f,
5247 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00005248 });
5249
5250 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005251 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005252 {
5253 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005254 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005255 inputData = tmp;
5256 }
5257
5258 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005259
5260 LayerTestResult<float, 4> result(outputTensorInfo);
5261 result.outputExpected = input;
5262
5263 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5264 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5265
5266 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005267 descriptor.m_Parameters.m_DataLayout = dataLayout;
5268 armnn::WorkloadInfo info;
5269 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5270 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5271
5272 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5273
5274 inputHandle->Allocate();
5275 outputHandle->Allocate();
5276 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5277
Derek Lambertif30f7d32019-04-09 10:25:02 +01005278 workload->PostAllocationConfigure();
James Conroy074f3712018-10-03 09:32:03 +01005279 workload->Execute();
5280
5281 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5282 return result;
5283}
5284
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005285LayerTestResult<float, 4> SimpleResizeBilinearTest(
5286 armnn::IWorkloadFactory& workloadFactory,
5287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005288 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005289{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005290 const armnn::TensorInfo inputTensorInfo =
5291 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5292
5293 const armnn::TensorInfo outputTensorInfo =
5294 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005295
James Conroy6b965822018-11-01 11:33:09 +00005296 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005297 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005298 200.0f, 250.0f,
5299
5300 250.0f, 200.0f,
5301 250.0f, 1.0f
5302 });
James Conroy074f3712018-10-03 09:32:03 +01005303
5304 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5305 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005306 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5307 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5308 // which we would expect if projecting the centre).
5309
5310 std::vector<float> outputData({
5311 1.0f,
5312
5313 250.0f
5314 });
5315
5316 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005317 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005318 {
5319 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005320 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005321 inputData = tmp;
5322
5323 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005324 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005325 outputData = tmp1;
5326 }
5327
5328 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5329
James Conroy074f3712018-10-03 09:32:03 +01005330 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005331 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005332
5333 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5334 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5335
5336 armnn::ResizeBilinearQueueDescriptor descriptor;
5337 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005338 armnn::WorkloadInfo info;
5339 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5340 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5341
5342 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5343
5344 inputHandle->Allocate();
5345 outputHandle->Allocate();
5346 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5347
Derek Lambertif30f7d32019-04-09 10:25:02 +01005348 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005349 workload->Execute();
5350
5351 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5352 return result;
5353}
5354
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005355LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5356 armnn::IWorkloadFactory& workloadFactory,
5357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005358 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005359{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005360 const armnn::TensorInfo inputTensorInfo =
5361 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5362
5363 const armnn::TensorInfo outputTensorInfo =
5364 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005365
James Conroy6b965822018-11-01 11:33:09 +00005366 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005367 1.0f, 2.0f, 3.0f, 4.0f,
5368 2.0f, 3.0f, 4.0f, 5.0f,
5369 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005370 4.0f, 5.0f, 6.0f, 7.0f,
5371
5372 7.0f, 6.0f, 5.0f, 4.0f,
5373 6.0f, 5.0f, 4.0f, 3.0f,
5374 5.0f, 4.0f, 3.0f, 2.0f,
5375 4.0f, 3.0f, 2.0f, 1.0f
5376 });
5377
5378 std::vector<float> outputData({
5379 1.0f, 3.0f,
5380 3.0f, 5.0f,
5381
5382 7.0f, 5.0f,
5383 5.0f, 3.0f
5384 });
5385
5386 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005387 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005388 {
5389 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005390 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005391 inputData = tmp;
5392
5393 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005394 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005395 outputData = tmp1;
5396 }
5397
5398 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005399
telsoa014fcda012018-03-09 14:13:49 +00005400 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005401 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005402
5403 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5404 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5405
5406 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005407 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005408 armnn::WorkloadInfo info;
5409 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5410 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5411
5412 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5413
5414 inputHandle->Allocate();
5415 outputHandle->Allocate();
5416 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5417
Derek Lambertif30f7d32019-04-09 10:25:02 +01005418 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005419 workload->Execute();
5420
5421 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5422 return result;
5423}
5424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005425LayerTestResult<float, 4> ResizeBilinearMinTest(
5426 armnn::IWorkloadFactory& workloadFactory,
5427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005428 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005429{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005430 const armnn::TensorInfo inputTensorInfo =
5431 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5432
5433 const armnn::TensorInfo outputTensorInfo =
5434 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005435
James Conroy6b965822018-11-01 11:33:09 +00005436 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005437 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5438 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005439 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5440
5441 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5442 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5443 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5444 });
5445
5446 std::vector<float> outputData({
5447 1.0f, 2.6666f, 6.00f,
5448 78.5f, 179.3333f, 401.00f,
5449
5450 987.0f, 454.6670f, 203.33f,
5451 48.5f, 22.3333f, 10.00f
5452 });
5453
5454 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005455 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005456 {
5457 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005458 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005459 inputData = tmp;
5460
5461 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005462 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005463 outputData = tmp1;
5464 }
5465
5466 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005467
5468 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005469 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005470
5471 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5472 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5473
5474 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005475 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005476 armnn::WorkloadInfo info;
5477 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5478 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5479
5480 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5481
5482 inputHandle->Allocate();
5483 outputHandle->Allocate();
5484 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5485
Derek Lambertif30f7d32019-04-09 10:25:02 +01005486 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005487 workload->Execute();
5488
5489 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5490 return result;
5491}
5492
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005493LayerTestResult<float, 4> ResizeBilinearMagTest(
5494 armnn::IWorkloadFactory& workloadFactory,
5495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005496 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005498 const armnn::TensorInfo inputTensorInfo =
5499 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5500
5501 const armnn::TensorInfo outputTensorInfo =
5502 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005503
James Conroy6b965822018-11-01 11:33:09 +00005504 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005505 1.0f, 2.0f,
5506 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005507 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005508
James Conroy6b965822018-11-01 11:33:09 +00005509 233.0f, 144.0f,
5510 21.0f, 13.0f,
5511 2.0f, 1.0f
5512 });
5513
5514 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005515 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5516 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005517 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5518
5519 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5520 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5521 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5522 });
5523
5524 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005525 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005526 {
5527 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005528 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005529 inputData = tmp;
5530
5531 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005532 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005533 outputData = tmp1;
5534 }
5535
5536 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5537
5538 LayerTestResult<float, 4> result(outputTensorInfo);
5539 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005540
5541 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5542 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5543
5544 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005545 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005546 armnn::WorkloadInfo info;
5547 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5548 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5549
5550 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5551
5552 inputHandle->Allocate();
5553 outputHandle->Allocate();
5554 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5555
Derek Lambertif30f7d32019-04-09 10:25:02 +01005556 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005557 workload->Execute();
5558
5559 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5560 return result;
5561}
5562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005563LayerTestResult<float, 2> FakeQuantizationTest(
5564 armnn::IWorkloadFactory& workloadFactory,
5565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005566{
5567 constexpr unsigned int width = 2;
5568 constexpr unsigned int height = 3;
5569
5570 const armnn::TensorInfo tensorInfo({height, width },
5571 armnn::DataType::Float32);
5572 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5573 -10.0f, -5.0f,
5574 0.0f, 5.0f,
5575 10.0f, 10.0f
5576 }));
5577
5578 LayerTestResult<float, 2> ret(tensorInfo);
5579
5580 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5581
5582 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5583
5584 armnn::FakeQuantizationQueueDescriptor data;
5585 armnn::WorkloadInfo info;
5586
5587 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5588 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5589 float min = -10.f;
5590 float max = 10.f;
5591
5592 data.m_Parameters.m_Min = min;
5593 data.m_Parameters.m_Max = max;
5594
5595 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5596 armnn::FakeQuantizationQueueDescriptor refData = data;
5597 armnn::WorkloadInfo refInfo = info;
5598 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5599
5600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5601
5602 inputHandle->Allocate();
5603 outputHandle->Allocate();
5604
5605 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5606
Derek Lambertif30f7d32019-04-09 10:25:02 +01005607 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005608 workload->Execute();
5609
5610 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5611
5612 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5613 0.0f, 63.0f,
5614 128.0f, 191.0f,
5615 255.0f, 255.0f
5616 }));
5617 return ret;
5618}
5619
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005620namespace
5621{
5622
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005623LayerTestResult<float, 4> L2NormalizationTestImpl(
5624 armnn::IWorkloadFactory& workloadFactory,
5625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5626 const armnn::TensorShape& inputOutputTensorShape,
5627 const std::vector<float>& inputValues,
5628 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005629 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005630{
5631 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5632 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5633
jimfly013aab7c32018-11-12 13:32:08 +00005634 // at this point if we require it permute the input data
5635 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5636 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005637 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005638 {
5639 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005640 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005641 inputData = tmp;
5642 }
5643
5644 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005645
5646 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005647 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005648 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005649 {
5650 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005651 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5652 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005653 expectedOutputData = tmp;
5654 }
5655 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005656
5657 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5658 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5659
5660 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005661 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005662 armnn::WorkloadInfo info;
5663
5664 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5665 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5666
5667 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5668
5669 inputHandle->Allocate();
5670 outputHandle->Allocate();
5671
5672 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5673
Derek Lambertif30f7d32019-04-09 10:25:02 +01005674 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005675 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005676
5677 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5678
5679 return result;
5680}
5681
5682float CalcInvL2Norm(std::initializer_list<float> elements)
5683{
5684 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5685 [](float acc, float element) { return acc + element * element; });
5686 return 1.0f / sqrtf(reduction);
5687}
5688
5689} // anonymous namespace
5690
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005691template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005692LayerTestResult<T, 2> Pad2dTestCommon(
5693 armnn::IWorkloadFactory& workloadFactory,
5694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5695 float qScale,
5696 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005697{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005698 const armnn::TensorShape inputShape{ 3, 3 };
5699 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005700
Derek Lambertif30f7d32019-04-09 10:25:02 +01005701 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5702 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005703
Derek Lambertif30f7d32019-04-09 10:25:02 +01005704 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005705 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005706 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005707 // Height (3) x Width (3)
5708 4, 8, 6,
5709 7, 4, 4,
5710 3, 2, 4
5711 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005712
Derek Lambertif30f7d32019-04-09 10:25:02 +01005713 std::vector<T> expectedOutputValues(
5714 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005715 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005716 0, 0, 0, 0, 0, 0, 0,
5717 0, 0, 0, 0, 0, 0, 0,
5718 0, 0, 4, 8, 6, 0, 0,
5719 0, 0, 7, 4, 4, 0, 0,
5720 0, 0, 3, 2, 4, 0, 0,
5721 0, 0, 0, 0, 0, 0, 0,
5722 0, 0, 0, 0, 0, 0, 0
5723 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005724
Derek Lambertif30f7d32019-04-09 10:25:02 +01005725 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005726
Derek Lambertif30f7d32019-04-09 10:25:02 +01005727 LayerTestResult<T, 2> result(outputTensorInfo);
5728 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005729
Derek Lambertif30f7d32019-04-09 10:25:02 +01005730 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5731 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005732
Derek Lambertif30f7d32019-04-09 10:25:02 +01005733 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005734
Derek Lambertif30f7d32019-04-09 10:25:02 +01005735 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5736 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5737 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005738
Derek Lambertif30f7d32019-04-09 10:25:02 +01005739 descriptor.m_Parameters.m_PadList = PadList;
5740 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005741
Derek Lambertif30f7d32019-04-09 10:25:02 +01005742 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5743 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005744
Derek Lambertif30f7d32019-04-09 10:25:02 +01005745 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005746
Derek Lambertif30f7d32019-04-09 10:25:02 +01005747 inputHandle->Allocate();
5748 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005749
Derek Lambertif30f7d32019-04-09 10:25:02 +01005750 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005751
Derek Lambertif30f7d32019-04-09 10:25:02 +01005752 workload->PostAllocationConfigure();
5753 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005754
Derek Lambertif30f7d32019-04-09 10:25:02 +01005755 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005756
Derek Lambertif30f7d32019-04-09 10:25:02 +01005757 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005758}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005759
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005760template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005761LayerTestResult<T, 3> Pad3dTestCommon(
5762 armnn::IWorkloadFactory& workloadFactory,
5763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5764 float qScale,
5765 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005766{
5767 const armnn::TensorShape inputShape{ 2, 2, 2 };
5768 const armnn::TensorShape outputShape{ 3, 5, 6 };
5769
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005770 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5771 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005772
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005773 std::vector<T> inputValues(
5774 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005775 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005776 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005777 0, 4,
5778 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005779
5780 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005781 6, 1,
5782 5, 2
5783 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005784
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005785 std::vector<T> expectedOutputValues(
5786 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005787 {
5788
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005789 0, 0, 0, 0, 0, 0,
5790 0, 0, 0, 0, 0, 0,
5791 0, 0, 0, 4, 0, 0,
5792 0, 0, 2, 5, 0, 0,
5793 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005794
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005795 0, 0, 0, 0, 0, 0,
5796 0, 0, 0, 0, 0, 0,
5797 0, 0, 6, 1, 0, 0,
5798 0, 0, 5, 2, 0, 0,
5799 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005800
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005801 0, 0, 0, 0, 0, 0,
5802 0, 0, 0, 0, 0, 0,
5803 0, 0, 0, 0, 0, 0,
5804 0, 0, 0, 0, 0, 0,
5805 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005807 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005808
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005809 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005810
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005811 LayerTestResult<T, 3> result(outputTensorInfo);
5812 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005813
5814 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5815 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5816
5817 armnn::PadQueueDescriptor descriptor;
5818
5819 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5820 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5821 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5822 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5823
5824 descriptor.m_Parameters.m_PadList = PadList;
5825 armnn::WorkloadInfo info;
5826
5827 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5828 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5829
5830 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5831
5832 inputHandle->Allocate();
5833 outputHandle->Allocate();
5834
5835 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5836
Derek Lambertif30f7d32019-04-09 10:25:02 +01005837 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005838 workload->Execute();
5839
5840 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5841
5842 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005843}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005844
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005845template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005846LayerTestResult<T, 4> Pad4dTestCommon(
5847 armnn::IWorkloadFactory& workloadFactory,
5848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5849 float qScale,
5850 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005851{
5852 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5853 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5854
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005855 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5856 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005857
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005858 std::vector<T> inputValues(
5859 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005860 {
5861 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005862 0, 1,
5863 2, 3,
5864 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005865
5866 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005867 6, 7,
5868 8, 9,
5869 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005870
5871 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005872 12, 13,
5873 14, 15,
5874 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005875
5876 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005877 18, 19,
5878 20, 21,
5879 22, 23
5880 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005881
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005882 std::vector<T> expectedOutputValues(
5883 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005884 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005885 0, 0, 0, 0,
5886 0, 0, 0, 0,
5887 0, 0, 0, 0,
5888 0, 0, 0, 0,
5889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005892
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005893 0, 0, 0, 0,
5894 0, 0, 0, 0,
5895 0, 0, 0, 0,
5896 0, 0, 0, 0,
5897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005900
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005901 0, 0, 0, 0,
5902 0, 0, 0, 0,
5903 0, 0, 0, 0,
5904 0, 0, 0, 0,
5905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005908
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005909 0, 0, 0, 0,
5910 0, 0, 0, 0,
5911 0, 0, 0, 0,
5912 0, 0, 0, 0,
5913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005916
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005917 0, 0, 0, 0,
5918 0, 0, 0, 0,
5919 0, 0, 0, 0,
5920 0, 0, 0, 0,
5921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005924
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005925 0, 0, 0, 0,
5926 0, 0, 0, 0,
5927 0, 0, 0, 0,
5928 0, 0, 0, 0,
5929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005932
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005933 0, 0, 0, 0,
5934 0, 0, 0, 0,
5935 0, 0, 0, 0,
5936 0, 0, 0, 0,
5937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005940
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005941 0, 0, 0, 0,
5942 0, 0, 0, 0,
5943 0, 0, 0, 0,
5944 0, 0, 1, 0,
5945 0, 2, 3, 0,
5946 0, 4, 5, 0,
5947 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005948
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005949 0, 0, 0, 0,
5950 0, 0, 0, 0,
5951 0, 0, 0, 0,
5952 0, 6, 7, 0,
5953 0, 8, 9, 0,
5954 0, 10, 11, 0,
5955 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005956
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005957 0, 0, 0, 0,
5958 0, 0, 0, 0,
5959 0, 0, 0, 0,
5960 0, 0, 0, 0,
5961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005964
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005965 0, 0, 0, 0,
5966 0, 0, 0, 0,
5967 0, 0, 0, 0,
5968 0, 0, 0, 0,
5969 0, 0, 0, 0,
5970 0, 0, 0, 0,
5971 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005972
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005973 0, 0, 0, 0,
5974 0, 0, 0, 0,
5975 0, 0, 0, 0,
5976 0, 0, 0, 0,
5977 0, 0, 0, 0,
5978 0, 0, 0, 0,
5979 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005980
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005981 0, 0, 0, 0,
5982 0, 0, 0, 0,
5983 0, 0, 0, 0,
5984 0, 12, 13, 0,
5985 0, 14, 15, 0,
5986 0, 16, 17, 0,
5987 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005988
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005989 0, 0, 0, 0,
5990 0, 0, 0, 0,
5991 0, 0, 0, 0,
5992 0, 18, 19, 0,
5993 0, 20, 21, 0,
5994 0, 22, 23, 0,
5995 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005996
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005997 0, 0, 0, 0,
5998 0, 0, 0, 0,
5999 0, 0, 0, 0,
6000 0, 0, 0, 0,
6001 0, 0, 0, 0,
6002 0, 0, 0, 0,
6003 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006004
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006005 0, 0, 0, 0,
6006 0, 0, 0, 0,
6007 0, 0, 0, 0,
6008 0, 0, 0, 0,
6009 0, 0, 0, 0,
6010 0, 0, 0, 0,
6011 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006012
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006013 0, 0, 0, 0,
6014 0, 0, 0, 0,
6015 0, 0, 0, 0,
6016 0, 0, 0, 0,
6017 0, 0, 0, 0,
6018 0, 0, 0, 0,
6019 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006020
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006021 0, 0, 0, 0,
6022 0, 0, 0, 0,
6023 0, 0, 0, 0,
6024 0, 0, 0, 0,
6025 0, 0, 0, 0,
6026 0, 0, 0, 0,
6027 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006028
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006029 0, 0, 0, 0,
6030 0, 0, 0, 0,
6031 0, 0, 0, 0,
6032 0, 0, 0, 0,
6033 0, 0, 0, 0,
6034 0, 0, 0, 0,
6035 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006036
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006037 0, 0, 0, 0,
6038 0, 0, 0, 0,
6039 0, 0, 0, 0,
6040 0, 0, 0, 0,
6041 0, 0, 0, 0,
6042 0, 0, 0, 0,
6043 0, 0, 0, 0
6044 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006045
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006046 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006047
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006048 LayerTestResult<T, 4> result(outputTensorInfo);
6049 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006050
6051 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6052 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6053
6054 armnn::PadQueueDescriptor descriptor;
6055
6056 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6057 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6058 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6059 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6060 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6061
6062 descriptor.m_Parameters.m_PadList = PadList;
6063 armnn::WorkloadInfo info;
6064
6065 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6066 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6067
6068 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6069
6070 inputHandle->Allocate();
6071 outputHandle->Allocate();
6072
6073 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6074
Derek Lambertif30f7d32019-04-09 10:25:02 +01006075 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006076 workload->Execute();
6077
6078 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6079
6080 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006081}
6082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006083LayerTestResult<uint8_t, 2> PadUint82dTest(
6084 armnn::IWorkloadFactory& workloadFactory,
6085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006086{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006087 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006088}
6089
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006090LayerTestResult<uint8_t, 3> PadUint83dTest(
6091 armnn::IWorkloadFactory& workloadFactory,
6092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006093{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006094 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006095}
6096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006097LayerTestResult<uint8_t, 4> PadUint84dTest(
6098 armnn::IWorkloadFactory& workloadFactory,
6099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006100{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006101 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006102}
6103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006104LayerTestResult<float, 2> PadFloat322dTest(
6105 armnn::IWorkloadFactory& workloadFactory,
6106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006107{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006108 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006109}
6110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006111LayerTestResult<float, 3> PadFloat323dTest(
6112 armnn::IWorkloadFactory& workloadFactory,
6113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006114{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006115 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006116}
6117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006118LayerTestResult<float, 4> PadFloat324dTest(
6119 armnn::IWorkloadFactory& workloadFactory,
6120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006122 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006123}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006125LayerTestResult<float, 4> L2Normalization1dTest(
6126 armnn::IWorkloadFactory& workloadFactory,
6127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006128 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006129{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006130 // Width: 1
6131 // Height: 1
6132 // Channels: 10
6133 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006134 unsigned int numberOfBatches = 1;
6135 unsigned int numberOfChannels = 10;
6136 unsigned int height = 1;
6137 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006138
jimfly013aab7c32018-11-12 13:32:08 +00006139
Nina Drozdd41b2592018-11-19 13:03:36 +00006140 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006141 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006142 std::vector<float> inputValues
6143 {
6144 // Batch 0, Channel 0, Height (1) x Width (1)
6145 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006146
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006147 // Batch 0, Channel 1, Height (1) x Width (1)
6148 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006149
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006150 // Batch 0, Channel 2, Height (1) x Width (1)
6151 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006152
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006153 // Batch 0, Channel 3, Height (1) x Width (1)
6154 4.0f,
6155
6156 // Batch 0, Channel 4, Height (1) x Width (1)
6157 5.0f,
6158
6159 // Batch 0, Channel 5, Height (1) x Width (1)
6160 6.0f,
6161
6162 // Batch 0, Channel 6, Height (1) x Width (1)
6163 7.0f,
6164
6165 // Batch 0, Channel 7, Height (1) x Width (1)
6166 8.0f,
6167
6168 // Batch 0, Channel 8, Height (1) x Width (1)
6169 9.0f,
6170
6171 // Batch 0, Channel 9, Height (1) x Width (1)
6172 10.0f
6173 };
telsoa014fcda012018-03-09 14:13:49 +00006174 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006175 std::vector<float> expectedOutputValues
6176 {
6177 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00006178 1.0f * approxInvL2Norm,
6179 2.0f * approxInvL2Norm,
6180 3.0f * approxInvL2Norm,
6181 4.0f * approxInvL2Norm,
6182 5.0f * approxInvL2Norm,
6183 6.0f * approxInvL2Norm,
6184 7.0f * approxInvL2Norm,
6185 8.0f * approxInvL2Norm,
6186 9.0f * approxInvL2Norm,
6187 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006188 };
telsoa014fcda012018-03-09 14:13:49 +00006189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006190
6191 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006192 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006193}
6194
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006195LayerTestResult<float, 4> L2Normalization2dTest(
6196 armnn::IWorkloadFactory& workloadFactory,
6197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006198 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006199{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006200 // Width: 5
6201 // Height: 1
6202 // Channels: 2
6203 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006204 unsigned int numberOfBatches = 1;
6205 unsigned int numberOfChannels = 2;
6206 unsigned int height = 1;
6207 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006208
Nina Drozdd41b2592018-11-19 13:03:36 +00006209 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006210 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006211 std::vector<float> inputValues
6212 {
6213 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006214 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006215
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006216 // Batch 0, Channel 1, Height (1) x Width (5)
6217 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6218 };
6219 std::vector<float> expectedOutputValues
6220 {
6221 // Batch 0, Channel 0, Height (1) x Width (5)
6222 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6223 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6224 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6225 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006226 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
6227
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006228 // Batch 0, Channel 1, Height (1) x Width (5)
6229 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6230 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6231 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6232 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006233 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006234 };
telsoa014fcda012018-03-09 14:13:49 +00006235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006236 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006237 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006238}
telsoa014fcda012018-03-09 14:13:49 +00006239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006240LayerTestResult<float, 4> L2Normalization3dTest(
6241 armnn::IWorkloadFactory& workloadFactory,
6242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006243 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006244{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006245 // Width: 3
6246 // Height: 4
6247 // Channels: 2
6248 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006249 unsigned int numberOfBatches = 1;
6250 unsigned int numberOfChannels = 2;
6251 unsigned int height = 4;
6252 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006253
Nina Drozdd41b2592018-11-19 13:03:36 +00006254 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006255 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006256 std::vector<float> inputValues
6257 {
6258 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006259 119.0f, 21.0f, 150.0f,
6260 149.0f, 32.0f, 179.0f,
6261 15.0f, 227.0f, 141.0f,
6262 147.0f, 199.0f, 220.0f,
6263
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006264 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006265 110.0f, 140.0f, 73.0f,
6266 211.0f, 212.0f, 89.0f,
6267 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006268 162.0f, 12.0f, 161.0f
6269 };
6270 std::vector<float> expectedOutputValues
6271 {
6272 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006273 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6274 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6275 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6276 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6277 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6278 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6279 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6280 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6281 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6282 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6283 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6284 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6285
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006286 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006287 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6288 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6289 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6290 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6291 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6292 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6293 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6294 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6295 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6296 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6297 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006298 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6299 };
telsoa014fcda012018-03-09 14:13:49 +00006300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006301 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006302 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006303}
telsoa014fcda012018-03-09 14:13:49 +00006304
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006305LayerTestResult<float, 4> L2Normalization4dTest(
6306 armnn::IWorkloadFactory& workloadFactory,
6307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006308 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006309{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006310 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006311 // Height: 4
6312 // Channels: 3
6313 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006314 unsigned int numberOfBatches = 2;
6315 unsigned int numberOfChannels = 3;
6316 unsigned int height = 4;
6317 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006318
Nina Drozdd41b2592018-11-19 13:03:36 +00006319 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006320 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006321 std::vector<float> inputValues
6322 {
6323 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006324 235.0f, 46.0f, 178.0f,
6325 100.0f, 123.0f, 19.0f,
6326 172.0f, 74.0f, 250.0f,
6327 6.0f, 195.0f, 80.0f,
6328
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006329 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006330 113.0f, 95.0f, 202.0f,
6331 77.0f, 114.0f, 71.0f,
6332 122.0f, 246.0f, 166.0f,
6333 82.0f, 28.0f, 37.0f,
6334
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006335 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006336 56.0f, 170.0f, 162.0f,
6337 194.0f, 89.0f, 254.0f,
6338 12.0f, 209.0f, 200.0f,
6339 1.0f, 64.0f, 54.0f,
6340
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006341 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006342 67.0f, 90.0f, 49.0f,
6343 7.0f, 163.0f, 18.0f,
6344 25.0f, 117.0f, 103.0f,
6345 247.0f, 59.0f, 189.0f,
6346
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006347 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006348 239.0f, 104.0f, 199.0f,
6349 17.0f, 124.0f, 153.0f,
6350 222.0f, 217.0f, 75.0f,
6351 32.0f, 126.0f, 21.0f,
6352
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006353 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006354 97.0f, 145.0f, 215.0f,
6355 115.0f, 116.0f, 238.0f,
6356 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006357 92.0f, 125.0f, 88.0f
6358 };
6359 std::vector<float> expectedOutputValues
6360 {
6361 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006362 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6363 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6364 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6365 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6366 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6367 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6368 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6369 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6370 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6371 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6372 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6373 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6374
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006375 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006376 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6377 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6378 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6379 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6380 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6381 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6382 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6383 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6384 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6385 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6386 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6387 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6388
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006389 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006390 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6391 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6392 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6393 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6394 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6395 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6396 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6397 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6398 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6399 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6400 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6401 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6402
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006403 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006404 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6405 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6406 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6407 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6408 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6409 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6410 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6411 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6412 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6413 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6414 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6415 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6416
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006417 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006418 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6419 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6420 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6421 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6422 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6423 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6424 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6425 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6426 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6427 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6428 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6429 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6430
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006431 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006432 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6433 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6434 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6435 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6436 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6437 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6438 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6439 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6440 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6441 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6442 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006443 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6444 };
telsoa014fcda012018-03-09 14:13:49 +00006445
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006446 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006447 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006448}
6449
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006450template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006451LayerTestResult<T, 4> ConstantTestImpl(
6452 armnn::IWorkloadFactory& workloadFactory,
6453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006454 float qScale,
6455 int32_t qOffset)
6456{
6457 constexpr unsigned int inputWidth = 3;
6458 constexpr unsigned int inputHeight = 4;
6459 constexpr unsigned int inputChannels = 3;
6460 constexpr unsigned int inputBatchSize = 2;
6461
6462 constexpr unsigned int outputWidth = inputWidth;
6463 constexpr unsigned int outputHeight = inputHeight;
6464 constexpr unsigned int outputChannels = inputChannels;
6465 constexpr unsigned int outputBatchSize = inputBatchSize;
6466
Nina Drozd58ef2c62019-05-16 12:09:18 +01006467 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6468 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006469
Nina Drozd58ef2c62019-05-16 12:09:18 +01006470 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6471 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006472
6473 // Set quantization parameters if the requested type is a quantized type.
6474 if(armnn::IsQuantizedType<T>())
6475 {
6476 inputTensorInfo.SetQuantizationScale(qScale);
6477 inputTensorInfo.SetQuantizationOffset(qOffset);
6478 outputTensorInfo.SetQuantizationScale(qScale);
6479 outputTensorInfo.SetQuantizationOffset(qOffset);
6480 }
6481
6482 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6483 QuantizedVector<T>(qScale, qOffset, {
6484 // Batch 0, Channel 0
6485 235.0f, 46.0f, 178.0f,
6486 100.0f, 123.0f, 19.0f,
6487 172.0f, 74.0f, 250.0f,
6488 6.0f, 195.0f, 80.0f,
6489
6490 // Batch 0, Channel 1
6491 113.0f, 95.0f, 202.0f,
6492 77.0f, 114.0f, 71.0f,
6493 122.0f, 246.0f, 166.0f,
6494 82.0f, 28.0f, 37.0f,
6495
6496 // Batch 0, Channel 2
6497 56.0f, 170.0f, 162.0f,
6498 194.0f, 89.0f, 254.0f,
6499 12.0f, 209.0f, 200.0f,
6500 1.0f, 64.0f, 54.0f,
6501
6502 // Batch 1, Channel 0
6503 67.0f, 90.0f, 49.0f,
6504 7.0f, 163.0f, 18.0f,
6505 25.0f, 117.0f, 103.0f,
6506 247.0f, 59.0f, 189.0f,
6507
6508 // Batch 1, Channel 1
6509 239.0f, 104.0f, 199.0f,
6510 17.0f, 124.0f, 153.0f,
6511 222.0f, 217.0f, 75.0f,
6512 32.0f, 126.0f, 21.0f,
6513
6514 // Batch 1, Channel 2
6515 97.0f, 145.0f, 215.0f,
6516 115.0f, 116.0f, 238.0f,
6517 226.0f, 16.0f, 132.0f,
6518 92.0f, 125.0f, 88.0f,
6519 })));
6520
6521 LayerTestResult<T, 4> result(outputTensorInfo);
6522 result.outputExpected = input;
6523
6524 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6525
6526 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6527 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6528
6529 armnn::ConstantQueueDescriptor descriptor;
6530 descriptor.m_LayerOutput = &constantTensor;
6531
6532 armnn::WorkloadInfo info;
6533 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6534
6535 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6536
6537 outputHandle->Allocate();
6538
Derek Lambertif30f7d32019-04-09 10:25:02 +01006539 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006540 workload->Execute();
6541
6542 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6543 return result;
6544}
6545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006546LayerTestResult<float, 4> ConstantTest(
6547 armnn::IWorkloadFactory& workloadFactory,
6548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006549{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006550 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006551}
6552
Nina Drozd58ef2c62019-05-16 12:09:18 +01006553LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6554 armnn::IWorkloadFactory& workloadFactory,
6555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6556{
6557 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6558}
6559
6560LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006561 armnn::IWorkloadFactory& workloadFactory,
6562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006563{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006564 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006565}
6566
Jim Flynn4ed6c832019-05-20 11:02:46 +01006567LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006568 armnn::IWorkloadFactory& workloadFactory,
6569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6570{
6571 unsigned int outputWidth = 3;
6572 unsigned int outputHeight = 6;
6573 unsigned int outputChannels = 3;
6574
6575 unsigned int inputWidth1 = 3;
6576 unsigned int inputHeight1 = 6;
6577 unsigned int inputChannels1 = 2;
6578
6579 unsigned int inputWidth2 = 3;
6580 unsigned int inputHeight2 = 6;
6581 unsigned int inputChannels2 = 1;
6582
6583 // Defines the tensor descriptors.
6584 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6585 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6586 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6587
6588 // Quantized input1 tensor. Range [-3, 1]
6589 const float inputScale1 = 0.015686f;
6590 const int32_t inputOffset1 = 192;
6591
6592 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6593 {
6594 1, 2, 3,
6595 4, 5, 6,
6596 7, 8, 9,
6597 10, 11, 12,
6598 13, 14, 15,
6599 16, 17, 18,
6600
6601 19, 20, 21,
6602 22, 23, 24,
6603 25, 26, 27,
6604 28, 29, 30,
6605 31, 32, 33,
6606 34, 35, 36,
6607 })
6608 );
6609
6610 // Quatized input2 tensor. Range [-1, 4]
6611 const float inputScale2 = 0.019608f;
6612 const int32_t inputOffset2 = 50;
6613
6614 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6615 {
6616 37, 38, 39,
6617 40, 41, 42,
6618 43, 44, 45,
6619 46, 47, 48,
6620 49, 50, 51,
6621 52, 53, 54,
6622 })
6623 );
6624
6625 // Output has the same quantization parameters than input1,
6626 // so that only the requantization of input2 is required
6627 const float outputScale = 0.015686f;
6628 const int32_t outputOffset = 192;
6629
6630 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6631
6632 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6633 {
6634 1, 2, 3,
6635 4, 5, 6,
6636 7, 8, 9,
6637 10, 11, 12,
6638 13, 14, 15,
6639 16, 17, 18,
6640
6641 19, 20, 21,
6642 22, 23, 24,
6643 25, 26, 27,
6644 28, 29, 30,
6645 31, 32, 33,
6646 34, 35, 36,
6647
6648 176, 177, 178,
6649 179, 181, 182,
6650 183, 184, 186,
6651 187, 188, 189,
6652 191, 192, 193,
6653 195, 196, 197,
6654 })
6655 );
6656
6657 outputTensorInfo.SetQuantizationScale(outputScale);
6658 outputTensorInfo.SetQuantizationOffset(outputOffset);
6659 inputTensorInfo1.SetQuantizationScale(inputScale1);
6660 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6661 inputTensorInfo2.SetQuantizationScale(inputScale2);
6662 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6663
6664 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006665 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006666
6667 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006668 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006669
6670 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6671
6672 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6673
6674 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6675 subTensorsSupported ?
6676 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6677 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6678
6679 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6680 subTensorsSupported ?
6681 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6682 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6683
Jim Flynne242f2d2019-05-22 14:24:13 +01006684 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00006685 armnn::WorkloadInfo info;
6686 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6687 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6688 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6689
6690 data.m_ViewOrigins.push_back(window1);
6691 data.m_ViewOrigins.push_back(window2);
6692
Jim Flynn4ed6c832019-05-20 11:02:46 +01006693 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006694
6695 inputHandle1->Allocate();
6696 inputHandle2->Allocate();
6697 outputHandle->Allocate();
6698
6699 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6700 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6701
Derek Lambertif30f7d32019-04-09 10:25:02 +01006702 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006703 workload->Execute();
6704
6705 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6706
6707 return ret;
6708}
6709
Jim Flynn4ed6c832019-05-20 11:02:46 +01006710LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006711 armnn::IWorkloadFactory& workloadFactory,
6712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006713{
surmeh013537c2c2018-05-18 16:31:43 +01006714 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006715 unsigned int outputHeight = 6;
6716 unsigned int outputChannels = 3;
6717
surmeh013537c2c2018-05-18 16:31:43 +01006718 unsigned int inputWidth1 = 3;
6719 unsigned int inputHeight1 = 6;
6720 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006721
surmeh013537c2c2018-05-18 16:31:43 +01006722 unsigned int inputWidth2 = 3;
6723 unsigned int inputHeight2 = 6;
6724 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006725
telsoa01c577f2c2018-08-31 09:22:23 +01006726 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006727 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6728 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6729 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006730
Jim Flynn4ed6c832019-05-20 11:02:46 +01006731 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006732 const float scale = 0.13497836f;
6733 const int32_t offset = -7;
6734
6735 outputTensorInfo.SetQuantizationScale(scale);
6736 outputTensorInfo.SetQuantizationOffset(offset);
6737 inputTensorInfo1.SetQuantizationScale(scale);
6738 inputTensorInfo1.SetQuantizationOffset(offset);
6739 inputTensorInfo2.SetQuantizationScale(scale);
6740 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006741
6742 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6743
6744 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006745 {
6746 1, 2, 3,
6747 4, 5, 6,
6748 7, 8, 9,
6749 10, 11, 12,
6750 13, 14, 15,
6751 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006752
surmeh013537c2c2018-05-18 16:31:43 +01006753 19, 20, 21,
6754 22, 23, 24,
6755 25, 26, 27,
6756 28, 29, 30,
6757 31, 32, 33,
6758 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006759
surmeh013537c2c2018-05-18 16:31:43 +01006760 37, 38, 39,
6761 40, 41, 42,
6762 43, 44, 45,
6763 46, 47, 48,
6764 49, 50, 51,
6765 52, 53, 54,
6766 })
telsoa014fcda012018-03-09 14:13:49 +00006767 );
6768
telsoa014fcda012018-03-09 14:13:49 +00006769 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6770 {
surmeh013537c2c2018-05-18 16:31:43 +01006771 1, 2, 3,
6772 4, 5, 6,
6773 7, 8, 9,
6774 10, 11, 12,
6775 13, 14, 15,
6776 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006777
surmeh013537c2c2018-05-18 16:31:43 +01006778 19, 20, 21,
6779 22, 23, 24,
6780 25, 26, 27,
6781 28, 29, 30,
6782 31, 32, 33,
6783 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006784 })
6785 );
6786
6787 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6788 {
surmeh013537c2c2018-05-18 16:31:43 +01006789 37, 38, 39,
6790 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006791 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006792 46, 47, 48,
6793 49, 50, 51,
6794 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006795 })
6796 );
6797
telsoa01c577f2c2018-08-31 09:22:23 +01006798 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006799 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00006800
telsoa01c577f2c2018-08-31 09:22:23 +01006801 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006802 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00006803
telsoa014fcda012018-03-09 14:13:49 +00006804
6805 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6806
6807 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6808
6809 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6810 subTensorsSupported ?
6811 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6812 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6813
6814 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6815 subTensorsSupported ?
6816 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6817 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6818
telsoa014fcda012018-03-09 14:13:49 +00006819
Jim Flynne242f2d2019-05-22 14:24:13 +01006820 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00006821 armnn::WorkloadInfo info;
6822 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6823 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006824 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6825
6826 data.m_ViewOrigins.push_back(window1);
6827 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006828
Jim Flynn4ed6c832019-05-20 11:02:46 +01006829 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00006830
6831 inputHandle1->Allocate();
6832 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006833 outputHandle->Allocate();
6834
6835 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6836 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006837
Derek Lambertif30f7d32019-04-09 10:25:02 +01006838 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006839 workload->Execute();
6840
6841 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6842
6843 return ret;
6844}
6845
Jim Flynn4ed6c832019-05-20 11:02:46 +01006846LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01006847 armnn::IWorkloadFactory& workloadFactory,
6848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6849{
6850 unsigned int outputWidth = 3;
6851 unsigned int outputHeight = 6;
6852 unsigned int outputChannels = 3;
6853
6854 unsigned int inputWidth1 = 3;
6855 unsigned int inputHeight1 = 6;
6856 unsigned int inputChannels1 = 2;
6857
6858 unsigned int inputWidth2 = 3;
6859 unsigned int inputHeight2 = 6;
6860 unsigned int inputChannels2 = 1;
6861
6862 // Defines the tensor descriptors.
6863 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
6864 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
6865 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
6866
Jim Flynn4ed6c832019-05-20 11:02:46 +01006867 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01006868 const float scale = 0.13497836f;
6869 const int32_t offset = -7;
6870
6871 outputTensorInfo.SetQuantizationScale(scale);
6872 outputTensorInfo.SetQuantizationOffset(offset);
6873 inputTensorInfo1.SetQuantizationScale(scale);
6874 inputTensorInfo1.SetQuantizationOffset(offset);
6875 inputTensorInfo2.SetQuantizationScale(scale);
6876 inputTensorInfo2.SetQuantizationOffset(offset);
6877
6878 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
6879
6880 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
6881 {
6882 1, 2, 3,
6883 4, 5, 6,
6884 7, 8, 9,
6885 10, 11, 12,
6886 13, 14, 15,
6887 16, 17, 18,
6888
6889 19, 20, 21,
6890 22, 23, 24,
6891 25, 26, 27,
6892 28, 29, 30,
6893 31, 32, 33,
6894 34, 35, 36,
6895
6896 37, 38, 39,
6897 40, 41, 42,
6898 43, 44, 45,
6899 46, 47, 48,
6900 49, 50, 51,
6901 52, 53, 54,
6902 }));
6903
6904 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
6905 {
6906 1, 2, 3,
6907 4, 5, 6,
6908 7, 8, 9,
6909 10, 11, 12,
6910 13, 14, 15,
6911 16, 17, 18,
6912
6913 19, 20, 21,
6914 22, 23, 24,
6915 25, 26, 27,
6916 28, 29, 30,
6917 31, 32, 33,
6918 34, 35, 36,
6919 }));
6920
6921 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
6922 {
6923 37, 38, 39,
6924 40, 41, 42,
6925 43, 44, 45,
6926 46, 47, 48,
6927 49, 50, 51,
6928 52, 53, 54,
6929 }));
6930
6931 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006932 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006933
6934 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006935 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006936
6937
6938 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6939
6940 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6941
6942 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6943 subTensorsSupported ?
6944 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6945 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6946
6947 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6948 subTensorsSupported ?
6949 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6950 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6951
6952
Jim Flynne242f2d2019-05-22 14:24:13 +01006953 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006954 armnn::WorkloadInfo info;
6955 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6956 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6957 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6958
6959 data.m_ViewOrigins.push_back(window1);
6960 data.m_ViewOrigins.push_back(window2);
6961
Jim Flynn4ed6c832019-05-20 11:02:46 +01006962 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006963
6964 inputHandle1->Allocate();
6965 inputHandle2->Allocate();
6966 outputHandle->Allocate();
6967
6968 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6969 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6970
6971 workload->PostAllocationConfigure();
6972 workload->Execute();
6973
6974 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6975
6976 return ret;
6977}
telsoa014fcda012018-03-09 14:13:49 +00006978
surmeh01bceff2f2018-03-29 16:29:27 +01006979namespace
telsoa014fcda012018-03-09 14:13:49 +00006980{
Sadik Armagan2999a022019-04-09 14:20:12 +01006981template <typename T>
6982LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006983 armnn::IWorkloadFactory& workloadFactory,
6984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6985 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006986 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006987 float scale0,
6988 int32_t offset0,
6989 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006990 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006991 float scale1,
6992 int32_t offset1,
6993 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006994 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006995 float outScale,
6996 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006997{
Sadik Armagan2999a022019-04-09 14:20:12 +01006998 auto dataType = (std::is_same<T, uint8_t>::value ?
6999 armnn::DataType::QuantisedAsymm8 :
7000 armnn::DataType::QuantisedSymm16);
7001
7002 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7003 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7004 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007005
surmeh01bceff2f2018-03-29 16:29:27 +01007006 inputTensorInfo0.SetQuantizationScale(scale0);
7007 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007008
surmeh01bceff2f2018-03-29 16:29:27 +01007009 inputTensorInfo1.SetQuantizationScale(scale1);
7010 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007011
surmeh01bceff2f2018-03-29 16:29:27 +01007012 outputTensorInfo.SetQuantizationScale(outScale);
7013 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007014
Sadik Armagan2999a022019-04-09 14:20:12 +01007015 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7016 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007017
Sadik Armagan2999a022019-04-09 14:20:12 +01007018 LayerTestResult<T, 4> result(outputTensorInfo);
7019 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7020
7021 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7022 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7023 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7024
7025 armnn::AdditionQueueDescriptor data;
7026 armnn::WorkloadInfo info;
7027 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7028 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7029 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7030
7031 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7032
7033 inputHandle0->Allocate();
7034 inputHandle1->Allocate();
7035 outputHandle->Allocate();
7036
7037 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7038 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7039
Derek Lambertif30f7d32019-04-09 10:25:02 +01007040 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007041 workload->Execute();
7042
7043 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7044
7045 return result;
7046}
7047} // anonymous namespace
7048
7049LayerTestResult<uint8_t, 4> AdditionUint8Test(
7050 armnn::IWorkloadFactory& workloadFactory,
7051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7052{
7053 const unsigned int shape0[] = { 1, 2, 2, 3 };
7054 const unsigned int shape1[] = { 1, 2, 2, 3 };
7055
7056 std::vector<uint8_t> input0(
7057 {
7058 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7059 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7060 });
7061
7062 std::vector<uint8_t> input1(
7063 {
7064 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7065 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7066 });
7067
7068 std::vector<uint8_t> output(
7069 {
7070 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7071 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7072 });
7073
7074 return AdditionQuantizeTestHelper(workloadFactory,
7075 memoryManager,
7076 shape0, input0, 7.0f, 3,
7077 shape1, input1, 7.0f, 3,
7078 shape0, output, 7.0f, 3);
7079}
7080
7081LayerTestResult<int16_t, 4> AdditionInt16Test(
7082 armnn::IWorkloadFactory& workloadFactory,
7083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7084{
7085 const unsigned int shape0[] = { 1, 2, 2, 3 };
7086 const unsigned int shape1[] = { 1, 2, 2, 3 };
7087
7088 std::vector<int16_t> input0(
7089 {
7090 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7091 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7092 });
7093
7094 std::vector<int16_t> input1(
7095 {
7096 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7097 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7098 });
7099
7100 std::vector<int16_t> output(
7101 {
7102 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7103 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7104 });
7105
7106 return AdditionQuantizeTestHelper(workloadFactory,
7107 memoryManager,
7108 shape0, input0, 7.0f, 0,
7109 shape1, input1, 7.0f, 0,
7110 shape0, output, 7.0f, 0);
7111}
7112
7113namespace
7114{
7115template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7116LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7117 armnn::IWorkloadFactory& workloadFactory,
7118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7119 const unsigned int shape0[4],
7120 const std::vector<T> & values0,
7121 float scale0,
7122 int32_t offset0,
7123 const unsigned int shape1[4],
7124 const std::vector<T> & values1,
7125 float scale1,
7126 int32_t offset1,
7127 const unsigned int outShape[4],
7128 const std::vector<T> & outValues,
7129 float outScale,
7130 int32_t outOffset)
7131{
7132 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7133 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7134 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7135
7136 inputTensorInfo0.SetQuantizationScale(scale0);
7137 inputTensorInfo0.SetQuantizationOffset(offset0);
7138
7139 inputTensorInfo1.SetQuantizationScale(scale1);
7140 inputTensorInfo1.SetQuantizationOffset(offset1);
7141
7142 outputTensorInfo.SetQuantizationScale(outScale);
7143 outputTensorInfo.SetQuantizationOffset(outOffset);
7144
7145 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7146 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7147
7148 LayerTestResult<T, 4> result(outputTensorInfo);
7149 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007150
surmeh01bceff2f2018-03-29 16:29:27 +01007151 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007152 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007153 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7154
7155 armnn::MultiplicationQueueDescriptor data;
7156 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007157 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7158 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007159 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7160
7161 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7162
surmeh01bceff2f2018-03-29 16:29:27 +01007163 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007164 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007165 outputHandle->Allocate();
7166
surmeh01bceff2f2018-03-29 16:29:27 +01007167 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007168 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007169
Derek Lambertif30f7d32019-04-09 10:25:02 +01007170 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007171 workload->Execute();
7172
7173 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7174
7175 return result;
7176}
surmeh01bceff2f2018-03-29 16:29:27 +01007177} // anonymous namespace
7178
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007179LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7180 armnn::IWorkloadFactory& workloadFactory,
7181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007182{
7183 unsigned int batchSize = 1;
7184 unsigned int channels = 2;
7185 unsigned int height = 2;
7186 unsigned int width = 3;
7187 const unsigned int shape[] = { batchSize, channels, height, width };
7188
telsoa01c577f2c2018-08-31 09:22:23 +01007189 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007190 std::vector<uint8_t> input0({
7191 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7192 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7193 });
7194
telsoa01c577f2c2018-08-31 09:22:23 +01007195 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007196 std::vector<uint8_t> input1({
7197 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7198 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7199 });
7200
telsoa01c577f2c2018-08-31 09:22:23 +01007201 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007202 std::vector<uint8_t> output(
7203 {
7204 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7205 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7206 });
7207
Sadik Armagan2999a022019-04-09 14:20:12 +01007208 // Scale/offset chosen to have output values out of range.
7209 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7210 memoryManager,
7211 shape,
7212 input0,
7213 4.0f,
7214 1,
7215 shape,
7216 input1,
7217 3.0f,
7218 -2,
7219 shape,
7220 output,
7221 1366.255f,
7222 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007223}
7224
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007225LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7226 armnn::IWorkloadFactory& workloadFactory,
7227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007228{
7229 const unsigned int shape0[] = { 1, 2, 2, 3 };
7230 const unsigned int shape1[] = { 1, 1, 1, 1 };
7231
7232 std::vector<uint8_t> input0({
7233 1, 2, 3, 4, 5, 6,
7234 7, 8, 9, 10, 11, 12
7235 });
7236
7237 std::vector<uint8_t> input1({2});
7238
7239 std::vector<uint8_t> output({
7240 2, 4, 6, 8, 10, 12,
7241 14, 16, 18, 20, 22, 24
7242 });
7243
Sadik Armagan2999a022019-04-09 14:20:12 +01007244 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7245 memoryManager,
7246 shape0,
7247 input0,
7248 1.0f,
7249 0,
7250 shape1,
7251 input1,
7252 1.0f,
7253 0,
7254 shape0,
7255 output,
7256 1.0f,
7257 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007258}
7259
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007260LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7261 armnn::IWorkloadFactory& workloadFactory,
7262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007263{
7264 const unsigned int shape0[] = { 1, 2, 2, 3 };
7265 const unsigned int shape1[] = { 1, 1, 1, 3 };
7266
7267 std::vector<uint8_t> input0({
7268 1, 2, 3, 4, 5, 6,
7269 7, 8, 9, 10, 11, 12
7270 });
7271
7272 std::vector<uint8_t> input1({1, 2, 3});
7273
7274 std::vector<uint8_t> output({
7275 1, 4, 9, 4, 10, 18,
7276 7, 16, 27, 10, 22, 36
7277 });
7278
Sadik Armagan2999a022019-04-09 14:20:12 +01007279 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7280 memoryManager,
7281 shape0,
7282 input0,
7283 1.0f,
7284 0,
7285 shape1,
7286 input1,
7287 1.0f,
7288 0,
7289 shape0,
7290 output,
7291 1.0f,
7292 0);
7293}
7294
7295LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7296 armnn::IWorkloadFactory& workloadFactory,
7297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7298{
7299 const unsigned int shape[] = { 1, 2, 2, 3 };
7300
7301 std::vector<int16_t> input0(
7302 {
7303 6, 7, 8, 9, 10, 11,
7304 12, 13, 14, 15, 16, 17
7305 });
7306
7307 std::vector<int16_t> input1(
7308 {
7309 1, 2, 3, 4, 5, 6,
7310 7, 8, 9, 10, 11, 12
7311 });
7312
7313 std::vector<int16_t> output(
7314 {
7315 6, 14, 24, 36, 50, 66,
7316 84, 104, 126, 150, 176, 204
7317 });
7318
7319 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7320 memoryManager,
7321 shape,
7322 input0,
7323 1.0f,
7324 0,
7325 shape,
7326 input1,
7327 1.0f,
7328 0,
7329 shape,
7330 output,
7331 1.0f,
7332 0);
7333}
7334
7335LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7336 armnn::IWorkloadFactory& workloadFactory,
7337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7338{
7339 const unsigned int shape0[] = { 1, 2, 2, 3 };
7340 const unsigned int shape1[] = { 1, 1, 1, 1 };
7341
7342 std::vector<int16_t> input0(
7343 {
7344 1, 2, 3, 4, 5, 6,
7345 7, 8, 9, 10, 11, 12
7346 });
7347
7348 std::vector<int16_t> input1({2});
7349
7350 std::vector<int16_t> output(
7351 {
7352 2, 4, 6, 8, 10, 12,
7353 14, 16, 18, 20, 22, 24
7354 });
7355
7356 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7357 memoryManager,
7358 shape0,
7359 input0,
7360 1.0f,
7361 0,
7362 shape1,
7363 input1,
7364 1.0f,
7365 0,
7366 shape0,
7367 output,
7368 1.0f,
7369 0);
7370}
7371
7372LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7373 armnn::IWorkloadFactory& workloadFactory,
7374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7375{
7376 const unsigned int shape0[] = { 1, 2, 2, 3 };
7377 const unsigned int shape1[] = { 1, 1, 1, 3 };
7378
7379 std::vector<int16_t> input0(
7380 {
7381 1, 2, 3, 4, 5, 6,
7382 7, 8, 9, 10, 11, 12
7383 });
7384
7385 std::vector<int16_t> input1({1, 2, 3});
7386
7387 std::vector<int16_t> output(
7388 {
7389 1, 4, 9, 4, 10, 18,
7390 7, 16, 27, 10, 22, 36
7391 });
7392
7393 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7394 memoryManager,
7395 shape0,
7396 input0,
7397 1.0f,
7398 0,
7399 shape1,
7400 input1,
7401 1.0f,
7402 0,
7403 shape0,
7404 output,
7405 1.0f,
7406 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007407}
telsoa014fcda012018-03-09 14:13:49 +00007408
David Beckf195f032018-09-06 16:46:34 +01007409namespace
7410{
Sadik Armagan2999a022019-04-09 14:20:12 +01007411template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007412LayerTestResult<T, 4> SubtractionTestHelper(
7413 armnn::IWorkloadFactory& workloadFactory,
7414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7415 const unsigned int shape0[4],
7416 const std::vector<T>& values0,
7417 float scale0,
7418 int32_t offset0,
7419 const unsigned int shape1[4],
7420 const std::vector<T> & values1,
7421 float scale1,
7422 int32_t offset1,
7423 const unsigned int outShape[4],
7424 const std::vector<T> & outValues,
7425 float outScale,
7426 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007427{
Sadik Armagan2999a022019-04-09 14:20:12 +01007428 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7429 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7430 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007431
7432 inputTensorInfo0.SetQuantizationScale(scale0);
7433 inputTensorInfo0.SetQuantizationOffset(offset0);
7434
7435 inputTensorInfo1.SetQuantizationScale(scale1);
7436 inputTensorInfo1.SetQuantizationOffset(offset1);
7437
7438 outputTensorInfo.SetQuantizationScale(outScale);
7439 outputTensorInfo.SetQuantizationOffset(outOffset);
7440
7441 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7442 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7443
7444 LayerTestResult<T, 4> result(outputTensorInfo);
7445 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7446
7447 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7448 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7449 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7450
7451 armnn::SubtractionQueueDescriptor data;
7452 armnn::WorkloadInfo info;
7453 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7454 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7455 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7456
7457 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7458
7459 inputHandle0->Allocate();
7460 inputHandle1->Allocate();
7461 outputHandle->Allocate();
7462
7463 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7464 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7465
Derek Lambertif30f7d32019-04-09 10:25:02 +01007466 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007467 workload->Execute();
7468
7469 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7470
7471 return result;
7472}
7473} // anonymous namespace
7474
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007475LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7476 armnn::IWorkloadFactory& workloadFactory,
7477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007478{
7479 const unsigned int shape0[] = { 1, 1, 2, 2 };
7480 const unsigned int shape1[] = { 1, 1, 2, 2 };
7481
7482 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7483 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7484 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7485
Sadik Armagan2999a022019-04-09 14:20:12 +01007486 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7487 memoryManager,
7488 shape0, input0, 0.5f, 2,
7489 shape1, input1, 1.0f, 0,
7490 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007491}
7492
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007493LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7494 armnn::IWorkloadFactory& workloadFactory,
7495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007496{
7497 const unsigned int shape0[] = { 1, 1, 2, 2 };
7498 const unsigned int shape1[] = { 1, 1, 1, 1 };
7499
7500 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7501 std::vector<uint8_t> input1({ 2 });
7502 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7503
Sadik Armagan2999a022019-04-09 14:20:12 +01007504 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7505 memoryManager,
7506 shape0, input0, 0.5f, 2,
7507 shape1, input1, 1.0f, 0,
7508 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007509}
7510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007511LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7512 armnn::IWorkloadFactory& workloadFactory,
7513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007514{
7515 const unsigned int shape0[] = { 1, 1, 2, 2 };
7516 const unsigned int shape1[] = { 1, 1, 2, 1 };
7517
7518 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7519 std::vector<uint8_t> input1({ 2, 1 });
7520 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7521
Sadik Armagan2999a022019-04-09 14:20:12 +01007522 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7523 memoryManager,
7524 shape0, input0, 1.0f, 0,
7525 shape1, input1, 1.0f, 0,
7526 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007527}
7528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007529LayerTestResult<float, 4> SubtractionTest(
7530 armnn::IWorkloadFactory& workloadFactory,
7531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007532{
7533 const unsigned int shape0[] = { 1, 1, 2, 2 };
7534 const unsigned int shape1[] = { 1, 1, 2, 2 };
7535
7536 std::vector<float> input0({ 1, 2, 3, 4 });
7537 std::vector<float> input1({ 1, -1, 0, 2 });
7538 std::vector<float> output({ 0, 3, 3, 2 });
7539
Sadik Armagan2999a022019-04-09 14:20:12 +01007540 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7541 memoryManager,
7542 shape0, input0, 1.0f, 0,
7543 shape1, input1, 1.0f, 0,
7544 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007545}
7546
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007547LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7548 armnn::IWorkloadFactory& workloadFactory,
7549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007550{
7551 const unsigned int shape0[] = { 1, 1, 2, 2 };
7552 const unsigned int shape1[] = { 1, 1, 1, 1 };
7553
7554 std::vector<float> input0({ 1, 2, 3, 4 });
7555 std::vector<float> input1({ 10 });
7556 std::vector<float> output({ -9, -8, -7, -6 });
7557
Sadik Armagan2999a022019-04-09 14:20:12 +01007558 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7559 memoryManager,
7560 shape0, input0, 1.0f, 0,
7561 shape1, input1, 1.0f, 0,
7562 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007563}
7564
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007565LayerTestResult<float, 4> SubtractionBroadcastTest(
7566 armnn::IWorkloadFactory& workloadFactory,
7567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007568{
7569 const unsigned int shape0[] = { 1, 1, 2, 2 };
7570 const unsigned int shape1[] = { 1, 1, 1, 2 };
7571
7572 std::vector<float> input0({ 1, 2, 3, 4 });
7573 std::vector<float> input1({ 10, -5 });
7574 std::vector<float> output({ -9, 7, -7, 9 });
7575
Sadik Armagan2999a022019-04-09 14:20:12 +01007576 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7577 memoryManager,
7578 shape0, input0, 1.0f, 0,
7579 shape1, input1, 1.0f, 0,
7580 shape0, output, 1.0f, 0);
7581}
7582
7583LayerTestResult<int16_t, 4> SubtractionInt16Test(
7584 armnn::IWorkloadFactory& workloadFactory,
7585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7586{
7587 const unsigned int shape0[] = { 1, 1, 2, 2 };
7588 const unsigned int shape1[] = { 1, 1, 2, 2 };
7589
7590 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7591 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7592 std::vector<int16_t> output({ 3, 3, 5, 5 });
7593
7594 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7595 memoryManager,
7596 shape0, input0, 0.5f, 0,
7597 shape1, input1, 1.0f, 0,
7598 shape0, output, 1.0f, 0);
7599}
7600
7601LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7602 armnn::IWorkloadFactory& workloadFactory,
7603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7604{
7605 const unsigned int shape0[] = { 1, 1, 2, 2 };
7606 const unsigned int shape1[] = { 1, 1, 1, 1 };
7607
7608 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7609 std::vector<int16_t> input1({ 2 });
7610 std::vector<int16_t> output({ 3, 4, 5, 6 });
7611
7612 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7613 memoryManager,
7614 shape0, input0, 0.5f, 0,
7615 shape1, input1, 1.0f, 0,
7616 shape0, output, 1.0f, 0);
7617}
7618
7619LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7620 armnn::IWorkloadFactory& workloadFactory,
7621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7622{
7623 const unsigned int shape0[] = { 1, 1, 2, 2 };
7624 const unsigned int shape1[] = { 1, 1, 2, 1 };
7625
7626 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7627 std::vector<int16_t> input1({ 2, 1 });
7628 std::vector<int16_t> output({ 8, 11, 12, 15 });
7629
7630 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7631 memoryManager,
7632 shape0, input0, 1.0f, 0,
7633 shape1, input1, 1.0f, 0,
7634 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007635}
7636
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007637LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7638 armnn::IWorkloadFactory& workloadFactory,
7639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007640{
7641 constexpr unsigned int inputWidth = 4;
7642 constexpr unsigned int inputHeight = 4;
7643 constexpr unsigned int inputChannels = 1;
7644 constexpr unsigned int inputBatchSize = 1;
7645
7646 constexpr unsigned int outputWidth = inputWidth;
7647 constexpr unsigned int outputHeight = inputHeight;
7648 constexpr unsigned int outputChannels = inputChannels;
7649 constexpr unsigned int outputBatchSize = inputBatchSize;
7650
7651 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7652 armnn::DataType::QuantisedAsymm8);
7653 inputTensorInfo.SetQuantizationScale(1.5f);
7654 inputTensorInfo.SetQuantizationOffset(-3);
7655
7656 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7657 armnn::DataType::QuantisedAsymm8);
7658 outputTensorInfo.SetQuantizationScale(1.5f);
7659 outputTensorInfo.SetQuantizationOffset(-3);
7660
7661 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7662 1, 2, 3, 4,
7663 2, 3, 4, 5,
7664 3, 4, 5, 6,
7665 4, 5, 6, 7
7666 }));
7667
7668 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7669 result.outputExpected = input;
7670
7671 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7672 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7673
7674 armnn::ResizeBilinearQueueDescriptor descriptor;
7675 armnn::WorkloadInfo info;
7676 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7677 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7678
7679 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7680
7681 inputHandle->Allocate();
7682 outputHandle->Allocate();
7683 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7684
Derek Lambertif30f7d32019-04-09 10:25:02 +01007685 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007686 workload->Execute();
7687
7688 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7689 return result;
7690}
7691
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007692LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7693 armnn::IWorkloadFactory& workloadFactory,
7694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007695{
7696 constexpr unsigned int inputWidth = 2;
7697 constexpr unsigned int inputHeight = 2;
7698 constexpr unsigned int inputChannels = 1;
7699 constexpr unsigned int inputBatchSize = 1;
7700
7701 constexpr unsigned int outputWidth = inputWidth / 2;
7702 constexpr unsigned int outputHeight = inputHeight / 2;
7703 constexpr unsigned int outputChannels = inputChannels;
7704 constexpr unsigned int outputBatchSize = inputBatchSize;
7705
7706 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7707 armnn::DataType::QuantisedAsymm8);
7708 inputTensorInfo.SetQuantizationScale(0.1567f);
7709 inputTensorInfo.SetQuantizationOffset(1);
7710
7711 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7712 armnn::DataType::QuantisedAsymm8);
7713 outputTensorInfo.SetQuantizationScale(0.1567f);
7714 outputTensorInfo.SetQuantizationOffset(1);
7715
7716 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7717 1, 255,
7718 200, 250
7719 }));
7720
7721 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7722 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007723 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007724 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7725 // the centre).
7726 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7727 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7728 1
7729 }));
7730
7731 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7732 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7733
7734 armnn::ResizeBilinearQueueDescriptor descriptor;
7735 armnn::WorkloadInfo info;
7736 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7737 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7738
7739 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7740
7741 inputHandle->Allocate();
7742 outputHandle->Allocate();
7743 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7744
Derek Lambertif30f7d32019-04-09 10:25:02 +01007745 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007746 workload->Execute();
7747
7748 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7749 return result;
7750}
7751
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007752LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7753 armnn::IWorkloadFactory& workloadFactory,
7754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007755{
7756 constexpr unsigned int inputWidth = 4;
7757 constexpr unsigned int inputHeight = 4;
7758 constexpr unsigned int inputChannels = 1;
7759 constexpr unsigned int inputBatchSize = 1;
7760
7761 constexpr unsigned int outputWidth = inputWidth / 2;
7762 constexpr unsigned int outputHeight = inputHeight / 2;
7763 constexpr unsigned int outputChannels = inputChannels;
7764 constexpr unsigned int outputBatchSize = inputBatchSize;
7765
7766 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7767 armnn::DataType::QuantisedAsymm8);
7768 inputTensorInfo.SetQuantizationScale(3.141592f);
7769 inputTensorInfo.SetQuantizationOffset(3);
7770
7771 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7772 armnn::DataType::QuantisedAsymm8);
7773 outputTensorInfo.SetQuantizationScale(3.141592f);
7774 outputTensorInfo.SetQuantizationOffset(3);
7775
7776 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7777 1, 2, 3, 4,
7778 2, 3, 4, 5,
7779 3, 4, 5, 6,
7780 4, 5, 6, 7
7781 }));
7782
7783 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7784 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7785 1, 3,
7786 3, 5
7787 }));
7788
7789 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7790 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7791
7792 armnn::ResizeBilinearQueueDescriptor descriptor;
7793 armnn::WorkloadInfo info;
7794 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7795 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7796
7797 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7798
7799 inputHandle->Allocate();
7800 outputHandle->Allocate();
7801 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7802
Derek Lambertif30f7d32019-04-09 10:25:02 +01007803 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007804 workload->Execute();
7805
7806 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7807 return result;
7808}
7809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007810LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7811 armnn::IWorkloadFactory& workloadFactory,
7812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007813{
7814 constexpr unsigned int inputWidth = 3;
7815 constexpr unsigned int inputHeight = 2;
7816 constexpr unsigned int inputChannels = 1;
7817 constexpr unsigned int inputBatchSize = 1;
7818
7819 constexpr unsigned int outputWidth = 2;
7820 constexpr unsigned int outputHeight = 1;
7821 constexpr unsigned int outputChannels = inputChannels;
7822 constexpr unsigned int outputBatchSize = inputBatchSize;
7823
7824 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7825 armnn::DataType::QuantisedAsymm8);
7826 inputTensorInfo.SetQuantizationScale(1.5f);
7827 inputTensorInfo.SetQuantizationOffset(-1);
7828
7829 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7830 armnn::DataType::QuantisedAsymm8);
7831 outputTensorInfo.SetQuantizationScale(1.5f);
7832 outputTensorInfo.SetQuantizationOffset(-1);
7833
7834 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7835 1, 2, 3, // 3.0, 4.5, 6.0
7836 5, 8, 13 // 9.0, 13.5, 21.0
7837 }));
7838
7839 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7840 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7841 1, 3 // 3.0, 5.25
7842 }));
7843
7844 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7845 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7846
7847 armnn::ResizeBilinearQueueDescriptor descriptor;
7848 armnn::WorkloadInfo info;
7849 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7850 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7851
7852 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7853
7854 inputHandle->Allocate();
7855 outputHandle->Allocate();
7856
7857 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7858
Derek Lambertif30f7d32019-04-09 10:25:02 +01007859 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007860 workload->Execute();
7861
7862 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7863 return result;
7864}
7865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007866LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7867 armnn::IWorkloadFactory& workloadFactory,
7868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007869{
7870 constexpr unsigned int inputWidth = 2;
7871 constexpr unsigned int inputHeight = 3;
7872 constexpr unsigned int inputChannels = 1;
7873 constexpr unsigned int inputBatchSize = 1;
7874
7875 constexpr unsigned int outputWidth = 5;
7876 constexpr unsigned int outputHeight = 3;
7877 constexpr unsigned int outputChannels = inputChannels;
7878 constexpr unsigned int outputBatchSize = inputBatchSize;
7879
7880 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7881 armnn::DataType::QuantisedAsymm8);
7882 inputTensorInfo.SetQuantizationScale(0.010765f);
7883 inputTensorInfo.SetQuantizationOffset(7);
7884
7885 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7886 armnn::DataType::QuantisedAsymm8);
7887 outputTensorInfo.SetQuantizationScale(0.010132f);
7888 outputTensorInfo.SetQuantizationOffset(-18);
7889
7890 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7891 24, 228, // 0.183005, 2.379065,
7892 105, 128, // 1.05497, 1.302565
7893 230, 71 // 2.400595, 0.68896
7894 }));
7895
7896 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7897 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7898 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7899 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7900 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7901 }));
7902
7903 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7904 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7905
7906 armnn::ResizeBilinearQueueDescriptor descriptor;
7907 armnn::WorkloadInfo info;
7908 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7909 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7910
7911 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7912
7913 inputHandle->Allocate();
7914 outputHandle->Allocate();
7915 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7916
Derek Lambertif30f7d32019-04-09 10:25:02 +01007917 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007918 workload->Execute();
7919
7920 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7921 return result;
7922}
7923
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007924LayerTestResult<float, 4> BatchNormTest(
7925 armnn::IWorkloadFactory& workloadFactory,
7926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007927{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007928 // BatchSize: 1
7929 // Channels: 2
7930 // Height: 3
7931 // Width: 2
7932
7933 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7934 std::vector<float> inputValues
7935 {
7936 // Batch 0, Channel 0, Height (3) x Width (2)
7937 1.f, 4.f,
7938 4.f, 2.f,
7939 1.f, 6.f,
7940
7941 // Batch 0, Channel 1, Height (3) x Width (2)
7942 1.f, 1.f,
7943 4.f, 1.f,
7944 -2.f, 4.f
7945 };
7946 std::vector<float> expectedOutputValues
7947 {
7948 // Batch 0, Channel 0, Height (3) x Width (2)
7949 1.f, 4.f,
7950 4.f, 2.f,
7951 1.f, 6.f,
7952
7953 // Batch 0, Channel 1, Height (3) x Width (2)
7954 3.f, 3.f,
7955 4.f, 3.f,
7956 2.f, 4.f
7957 };
7958
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007959 return BatchNormTestImpl<armnn::DataType::Float32>(
7960 workloadFactory, memoryManager,
7961 inputOutputShape, inputValues, expectedOutputValues,
7962 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007963}
7964
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007965LayerTestResult<float, 4> BatchNormNhwcTest(
7966 armnn::IWorkloadFactory& workloadFactory,
7967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007968{
7969 // BatchSize: 1
7970 // Height: 3
7971 // Width: 2
7972 // Channels: 2
7973
7974 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7975 std::vector<float> inputValues
7976 {
7977 // Batch 0, Height 0, Width (2) x Channel (2)
7978 1.f, 1.f,
7979 4.f, 1.f,
7980
7981 // Batch 0, Height 1, Width (2) x Channel (2)
7982 4.f, 4.f,
7983 2.f, 1.f,
7984
7985 // Batch 0, Height 2, Width (2) x Channel (2)
7986 1.f, -2.f,
7987 6.f, 4.f
7988 };
7989 std::vector<float> expectedOutputValues
7990 {
7991 // Batch 0, Height 0, Width (2) x Channel (2)
7992 1.f, 3.f,
7993 4.f, 3.f,
7994
7995 // Batch 0, Height 1, Width (2) x Channel (2)
7996 4.f, 4.f,
7997 2.f, 3.f,
7998
7999 // Batch 0, Height 2, Width (2) x Channel (2)
8000 1.f, 2.f,
8001 6.f, 4.f
8002 };
8003
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008004 return BatchNormTestImpl<armnn::DataType::Float32>(
8005 workloadFactory, memoryManager,
8006 inputOutputShape, inputValues, expectedOutputValues,
8007 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008008}
8009
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008010LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8011 armnn::IWorkloadFactory& workloadFactory,
8012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008013{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008014 // BatchSize: 1
8015 // Channels: 2
8016 // Height: 3
8017 // Width: 2
8018
8019 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8020 std::vector<float> inputValues
8021 {
8022 // Batch 0, Channel 0, Height (3) x Width (2)
8023 1.f, 4.f,
8024 4.f, 2.f,
8025 1.f, 6.f,
8026
8027 // Batch 0, Channel 1, Height (3) x Width (2)
8028 1.f, 1.f,
8029 4.f, 1.f,
8030 -2.f, 4.f
8031 };
8032 std::vector<float> expectedOutputValues
8033 {
8034 // Batch 0, Channel 0, Height (3) x Width (2)
8035 1.f, 4.f,
8036 4.f, 2.f,
8037 1.f, 6.f,
8038
8039 // Batch 0, Channel 1, Height (3) x Width (2)
8040 3.f, 3.f,
8041 4.f, 3.f,
8042 2.f, 4.f
8043 };
8044
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008045 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8046 workloadFactory, memoryManager,
8047 inputOutputShape, inputValues, expectedOutputValues,
8048 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008049}
8050
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008051LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8052 armnn::IWorkloadFactory& workloadFactory,
8053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008054{
8055 // BatchSize: 1
8056 // Height: 3
8057 // Width: 2
8058 // Channels: 2
8059
8060 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8061 std::vector<float> inputValues
8062 {
8063 // Batch 0, Height 0, Width (2) x Channel (2)
8064 1.f, 1.f,
8065 4.f, 1.f,
8066
8067 // Batch 0, Height 1, Width (2) x Channel (2)
8068 4.f, 4.f,
8069 2.f, 1.f,
8070
8071 // Batch 0, Height 2, Width (2) x Channel (2)
8072 1.f, -2.f,
8073 6.f, 4.f
8074 };
8075 std::vector<float> expectedOutputValues
8076 {
8077 // Batch 0, Height 0, Width (2) x Channel (2)
8078 1.f, 3.f,
8079 4.f, 3.f,
8080
8081 // Batch 0, Height 1, Width (2) x Channel (2)
8082 4.f, 4.f,
8083 2.f, 3.f,
8084
8085 // Batch 0, Height 2, Width (2) x Channel (2)
8086 1.f, 2.f,
8087 6.f, 4.f
8088 };
8089
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008090 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8091 (workloadFactory, memoryManager,
8092 inputOutputShape, inputValues, expectedOutputValues,
8093 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008094}
8095
Matteo Martincighf5507132019-06-04 10:59:47 +01008096LayerTestResult<int16_t, 4> BatchNormInt16Test(
8097 armnn::IWorkloadFactory& workloadFactory,
8098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8099{
8100 // BatchSize: 1
8101 // Channels: 2
8102 // Height: 3
8103 // Width: 2
8104
8105 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8106 std::vector<float> inputValues
8107 {
8108 // Batch 0, Channel 0, Height (3) x Width (2)
8109 1.f, 4.f,
8110 4.f, 2.f,
8111 1.f, 6.f,
8112
8113 // Batch 0, Channel 1, Height (3) x Width (2)
8114 1.f, 1.f,
8115 4.f, 1.f,
8116 -2.f, 4.f
8117 };
8118 std::vector<float> expectedOutputValues
8119 {
8120 // Batch 0, Channel 0, Height (3) x Width (2)
8121 1.f, 4.f,
8122 4.f, 2.f,
8123 1.f, 6.f,
8124
8125 // Batch 0, Channel 1, Height (3) x Width (2)
8126 3.f, 3.f,
8127 4.f, 3.f,
8128 2.f, 4.f
8129 };
8130
8131 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8132 workloadFactory, memoryManager,
8133 inputOutputShape, inputValues, expectedOutputValues,
8134 1.f/20.f, 50, armnn::DataLayout::NCHW);
8135}
8136
8137LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8138 armnn::IWorkloadFactory& workloadFactory,
8139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8140{
8141 // BatchSize: 1
8142 // Height: 3
8143 // Width: 2
8144 // Channels: 2
8145
8146 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8147 std::vector<float> inputValues
8148 {
8149 // Batch 0, Height 0, Width (2) x Channel (2)
8150 1.f, 1.f,
8151 4.f, 1.f,
8152
8153 // Batch 0, Height 1, Width (2) x Channel (2)
8154 4.f, 4.f,
8155 2.f, 1.f,
8156
8157 // Batch 0, Height 2, Width (2) x Channel (2)
8158 1.f, -2.f,
8159 6.f, 4.f
8160 };
8161 std::vector<float> expectedOutputValues
8162 {
8163 // Batch 0, Height 0, Width (2) x Channel (2)
8164 1.f, 3.f,
8165 4.f, 3.f,
8166
8167 // Batch 0, Height 1, Width (2) x Channel (2)
8168 4.f, 4.f,
8169 2.f, 3.f,
8170
8171 // Batch 0, Height 2, Width (2) x Channel (2)
8172 1.f, 2.f,
8173 6.f, 4.f
8174 };
8175
8176 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8177 (workloadFactory, memoryManager,
8178 inputOutputShape, inputValues, expectedOutputValues,
8179 1.f/20.f, 50, armnn::DataLayout::NHWC);
8180}
8181
Nina Drozd58ef2c62019-05-16 12:09:18 +01008182LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008183 armnn::IWorkloadFactory& workloadFactory,
8184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008186 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008187}
8188
Nina Drozd58ef2c62019-05-16 12:09:18 +01008189LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8190 armnn::IWorkloadFactory& workloadFactory,
8191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8192{
8193 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8194}
8195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008196LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8197 armnn::IWorkloadFactory& workloadFactory,
8198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008199{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008200 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008201}
8202
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008203LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8204 armnn::IWorkloadFactory& workloadFactory,
8205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008206{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008207 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008208}
8209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008210LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8211 armnn::IWorkloadFactory& workloadFactory,
8212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008213{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008214 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008215}
8216
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008217LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8218 armnn::IWorkloadFactory& workloadFactory,
8219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008220{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008221 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8222 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008223}
8224
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008225LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8226 armnn::IWorkloadFactory& workloadFactory,
8227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008228{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008229 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8230 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008231}
8232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008233LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8234 armnn::IWorkloadFactory& workloadFactory,
8235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008236{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008237 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008238}
8239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008240LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8241 armnn::IWorkloadFactory& workloadFactory,
8242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008243{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008244 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008245}
8246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008247LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8248 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8250 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008251{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008252 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8253 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008254}
8255
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008256LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8257 armnn::IWorkloadFactory& workloadFactory,
8258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008259{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008260 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008261}
8262
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008263LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8264 armnn::IWorkloadFactory& workloadFactory,
8265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008266{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008267 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8268 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008269}
8270
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008271LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8272 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8274 bool useSubtensor)
8275{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008276 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8277 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008278}
8279
8280LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8281 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008283{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008284 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008285}
8286
8287LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8288 armnn::IWorkloadFactory& workloadFactory,
8289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8290{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008291 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008292}
8293
8294LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8295 armnn::IWorkloadFactory& workloadFactory,
8296 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8297{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008298 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008299}
8300
8301LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8302 armnn::IWorkloadFactory& workloadFactory,
8303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8304{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008305 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8306 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008307}
8308
8309LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8310 armnn::IWorkloadFactory& workloadFactory,
8311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8312{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008313 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8314 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008315}
8316
8317LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8318 armnn::IWorkloadFactory& workloadFactory,
8319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8320{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008321 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8322 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008323}
8324
8325LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8326 armnn::IWorkloadFactory& workloadFactory,
8327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8328{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008329 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8330 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008331}
8332
8333LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8334 armnn::IWorkloadFactory& workloadFactory,
8335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8336 bool useSubtensor)
8337{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008338 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8339 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008340}
8341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008342LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8343 armnn::IWorkloadFactory& workloadFactory,
8344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8345 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008346{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008347 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8348 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008349}
8350
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008351LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8352 armnn::IWorkloadFactory& workloadFactory,
8353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8354 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008355{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008356 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008357 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008358}
8359
Teresa Charlin0434df62019-06-06 13:40:35 +01008360LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8361 armnn::IWorkloadFactory& workloadFactory,
8362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8363 bool forceNoPadding)
8364{
8365 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8366 workloadFactory, memoryManager, forceNoPadding);
8367}
8368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008369LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8370 armnn::IWorkloadFactory& workloadFactory,
8371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8372 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008373{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008374 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8375 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008376}
8377
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008378LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8379 armnn::IWorkloadFactory& workloadFactory,
8380 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8381 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008382{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008383 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008384 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008385}
8386
Teresa Charlin0434df62019-06-06 13:40:35 +01008387LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8388 armnn::IWorkloadFactory& workloadFactory,
8389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8390 bool forceNoPadding)
8391{
8392 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8393 workloadFactory, memoryManager, forceNoPadding);
8394}
8395
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008396LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8397 armnn::IWorkloadFactory& workloadFactory,
8398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008399 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008401 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008402}
8403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008404LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8405 armnn::IWorkloadFactory& workloadFactory,
8406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008407 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008409 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008410}
8411
Teresa Charlin0434df62019-06-06 13:40:35 +01008412LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8413 armnn::IWorkloadFactory& workloadFactory,
8414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8415 const armnn::DataLayout dataLayout)
8416{
8417 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8418}
8419LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8420 armnn::IWorkloadFactory& workloadFactory,
8421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8422{
8423 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8424}
8425
8426LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8427 armnn::IWorkloadFactory& workloadFactory,
8428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8429{
8430 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8431 workloadFactory, memoryManager, 1.0f, -5);
8432}
8433
8434LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8435 armnn::IWorkloadFactory& workloadFactory,
8436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8437{
8438 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8439 workloadFactory, memoryManager);
8440}
8441
8442LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8443 armnn::IWorkloadFactory& workloadFactory,
8444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8445{
8446 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8447}
8448
8449LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8450 armnn::IWorkloadFactory& workloadFactory,
8451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8452{
8453 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8454 workloadFactory, memoryManager, 1.0f, -5);
8455}
8456
8457LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8458 armnn::IWorkloadFactory& workloadFactory,
8459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8460{
8461 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8462 workloadFactory, memoryManager);
8463}
8464
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008465LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8466 armnn::IWorkloadFactory& workloadFactory,
8467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008468 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008469{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008470 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008471}
8472
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008473LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8474 armnn::IWorkloadFactory& workloadFactory,
8475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008476 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008477{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008478 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008479 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008480}
8481
Teresa Charlin0434df62019-06-06 13:40:35 +01008482LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8483 armnn::IWorkloadFactory& workloadFactory,
8484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8485 const armnn::DataLayout dataLayout)
8486{
8487 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8488 workloadFactory, memoryManager, dataLayout);
8489}
8490
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008491LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8492 armnn::IWorkloadFactory& workloadFactory,
8493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8494 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008495{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008496 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008497 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008498}
8499
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008500LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8501 armnn::IWorkloadFactory& workloadFactory,
8502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008503{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008504 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008505}
8506
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008507LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8508 armnn::IWorkloadFactory& workloadFactory,
8509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008510{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008511 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8512 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008513}
8514
Teresa Charlin0434df62019-06-06 13:40:35 +01008515LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8516 armnn::IWorkloadFactory& workloadFactory,
8517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8518{
8519 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8520 workloadFactory, memoryManager);
8521}
8522LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8523 armnn::IWorkloadFactory& workloadFactory,
8524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8525{
8526 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8527}
8528
8529LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8530 armnn::IWorkloadFactory& workloadFactory,
8531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8532{
8533 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8534 workloadFactory, memoryManager);
8535}
8536
8537LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8538 armnn::IWorkloadFactory& workloadFactory,
8539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8540{
8541 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8542 workloadFactory, memoryManager);
8543}
8544
8545LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8546 armnn::IWorkloadFactory& workloadFactory,
8547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8548{
8549 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8550 workloadFactory, memoryManager);
8551}
8552
8553LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8554 armnn::IWorkloadFactory& workloadFactory,
8555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8556{
8557 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8558 workloadFactory, memoryManager);
8559}
8560
8561LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8562 armnn::IWorkloadFactory& workloadFactory,
8563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8564{
8565 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8566 workloadFactory, memoryManager);
8567}
8568
8569LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8570 armnn::IWorkloadFactory& workloadFactory,
8571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8572{
8573 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8574}
8575
8576LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8577 armnn::IWorkloadFactory& workloadFactory,
8578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8579{
8580 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8581 workloadFactory, memoryManager);
8582}
8583
8584LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8585 armnn::IWorkloadFactory& workloadFactory,
8586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8587{
8588 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8589 workloadFactory, memoryManager);
8590}
8591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008592LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8593 armnn::IWorkloadFactory& workloadFactory,
8594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008595 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008596{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008597 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008598}
8599
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008600LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8601 armnn::IWorkloadFactory& workloadFactory,
8602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008603 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008604{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008605 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008606}
8607
Teresa Charlin0434df62019-06-06 13:40:35 +01008608LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8609 armnn::IWorkloadFactory& workloadFactory,
8610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8611 const armnn::DataLayout dataLayout)
8612{
8613 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8614}
8615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008616LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8617 armnn::IWorkloadFactory& workloadFactory,
8618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008620 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008621}
8622
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008623LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8624 armnn::IWorkloadFactory& workloadFactory,
8625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008626{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008627 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008628}
8629
Teresa Charlin0434df62019-06-06 13:40:35 +01008630LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8631 armnn::IWorkloadFactory& workloadFactory,
8632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8633{
8634 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8635}
8636
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008637LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8638 armnn::IWorkloadFactory& workloadFactory,
8639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008640{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008641 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008642}
8643
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008644LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8645 armnn::IWorkloadFactory& workloadFactory,
8646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008647{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008648 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008649}
8650
Teresa Charlin0434df62019-06-06 13:40:35 +01008651LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8652 armnn::IWorkloadFactory& workloadFactory,
8653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8654{
8655 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8656}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008657LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8658 armnn::IWorkloadFactory& workloadFactory,
8659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008660{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008661 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008662}
8663
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008664LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8665 armnn::IWorkloadFactory& workloadFactory,
8666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008667{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008668 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008669}
8670
Teresa Charlin0434df62019-06-06 13:40:35 +01008671LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8672 armnn::IWorkloadFactory& workloadFactory,
8673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8674{
8675 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8676}
8677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008678LayerTestResult<float, 4> L2Pooling2dSize7Test(
8679 armnn::IWorkloadFactory& workloadFactory,
8680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008681{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008682 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008683}
8684
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008685LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8686 armnn::IWorkloadFactory& workloadFactory,
8687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008688{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008689 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008690}
8691
Teresa Charlin0434df62019-06-06 13:40:35 +01008692LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8693 armnn::IWorkloadFactory& workloadFactory,
8694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8695{
8696 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8697}
8698
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008699LayerTestResult<float, 4> L2Pooling2dSize9Test(
8700 armnn::IWorkloadFactory& workloadFactory,
8701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008702{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008703 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008704}
8705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008706LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8707 armnn::IWorkloadFactory& workloadFactory,
8708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008710 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008711}
8712
Teresa Charlin0434df62019-06-06 13:40:35 +01008713LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
8714 armnn::IWorkloadFactory& workloadFactory,
8715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8716{
8717 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8718}
8719LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8720 armnn::IWorkloadFactory& workloadFactory,
8721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8722{
8723 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8724}
8725
8726LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8727 armnn::IWorkloadFactory& workloadFactory,
8728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8729{
8730 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8731}
8732
8733LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
8734 armnn::IWorkloadFactory& workloadFactory,
8735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8736{
8737 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8738}
8739
8740LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8741 armnn::IWorkloadFactory& workloadFactory,
8742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8743{
8744 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8745}
8746
8747LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8748 armnn::IWorkloadFactory& workloadFactory,
8749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8750{
8751 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8752}
8753
8754LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
8755 armnn::IWorkloadFactory& workloadFactory,
8756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8757{
8758 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8759}
8760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008761LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8762 armnn::IWorkloadFactory& workloadFactory,
8763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008765 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008766}
8767
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008768LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8769 armnn::IWorkloadFactory& workloadFactory,
8770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008771{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008772 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008773}
8774
Teresa Charlin0434df62019-06-06 13:40:35 +01008775LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
8776 armnn::IWorkloadFactory& workloadFactory,
8777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8778{
8779 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8780}
8781
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008782LayerTestResult<float, 4> ComparePooling2dTest(
8783 armnn::IWorkloadFactory& workloadFactory,
8784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8785 armnn::IWorkloadFactory& refWorkloadFactory,
8786 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008787{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008788 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008789 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008790}
8791
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008792LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8793 armnn::IWorkloadFactory& workloadFactory,
8794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8795 armnn::IWorkloadFactory& refWorkloadFactory,
8796 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008797{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008798 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008799 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008800}
8801
Teresa Charlin0434df62019-06-06 13:40:35 +01008802LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
8803 armnn::IWorkloadFactory& workloadFactory,
8804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8805 armnn::IWorkloadFactory& refWorkloadFactory,
8806 armnn::PoolingAlgorithm poolingType)
8807{
8808 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8809 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
8810}
8811
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008812LayerTestResult<float, 2> FullyConnectedLargeTest(
8813 armnn::IWorkloadFactory& workloadFactory,
8814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8815 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008816{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008817 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008818}
8819
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008820LayerTestResult<float, 4> SimplePermuteFloat32Test(
8821 armnn::IWorkloadFactory& workloadFactory,
8822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008823{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008824 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008825};
8826
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008827LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8828 armnn::IWorkloadFactory& workloadFactory,
8829 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008830{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008831 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008832};
surmeh01bceff2f2018-03-29 16:29:27 +01008833
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008834LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8835 armnn::IWorkloadFactory& workloadFactory,
8836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008837{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008838 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008839};
8840
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008841LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8842 armnn::IWorkloadFactory& workloadFactory,
8843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008844{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008845 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008846};
8847
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008848LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8849 armnn::IWorkloadFactory& workloadFactory,
8850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008851{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008852 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008853};
8854
8855namespace
8856{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008857
narpra011e4c31d2018-09-28 11:07:51 +01008858template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008859LayerTestResult<T, OutputDim> MeanTestHelper(
8860 armnn::IWorkloadFactory& workloadFactory,
8861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8862 const unsigned int* inputShape,
8863 const std::vector<T>& inputData,
8864 const std::vector<unsigned int>& axis,
8865 bool keepDims,
8866 const unsigned int* outputShape,
8867 const std::vector<T>& outputData,
8868 float scale = 1.0f,
8869 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008870{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008871 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008872
8873 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8874 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8875
8876 inputTensorInfo.SetQuantizationScale(scale);
8877 inputTensorInfo.SetQuantizationOffset(offset);
8878
8879 outputTensorInfo.SetQuantizationScale(scale);
8880 outputTensorInfo.SetQuantizationOffset(offset);
8881
8882 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8883
8884 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8885 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8886
8887 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8888 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8889
8890 armnn::MeanQueueDescriptor data;
8891 data.m_Parameters.m_Axis = axis;
8892 data.m_Parameters.m_KeepDims = keepDims;
8893 armnn::WorkloadInfo info;
8894 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8895 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8896
8897 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8898
8899 inputHandle->Allocate();
8900 outputHandle->Allocate();
8901
8902 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8903
Derek Lambertif30f7d32019-04-09 10:25:02 +01008904 workload->PostAllocationConfigure();
narpra011e4c31d2018-09-28 11:07:51 +01008905 workload->Execute();
8906
8907 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8908
8909 return result;
8910}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008911
narpra011e4c31d2018-09-28 11:07:51 +01008912} // anonymous namespace
8913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8915 armnn::IWorkloadFactory& workloadFactory,
8916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008917{
8918 const unsigned int inputShape[] = { 3, 2 };
8919 const unsigned int outputShape[] = { 1 };
8920
8921 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8922 std::vector<uint8_t> output({ 2 });
8923
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008924 return MeanTestHelper<uint8_t, 2, 1>(
8925 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008926}
8927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008928LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8929 armnn::IWorkloadFactory& workloadFactory,
8930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008931{
8932 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8933 const unsigned int outputShape[] = { 1, 1, 2 };
8934
8935 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8936 std::vector<uint8_t> output({ 2, 2 });
8937
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008938 return MeanTestHelper<uint8_t, 4, 3>(
8939 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008940}
8941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008942LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8943 armnn::IWorkloadFactory& workloadFactory,
8944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008945{
8946 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8947 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8948
8949 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8950 std::vector<uint8_t> output({ 2, 2 });
8951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008952 return MeanTestHelper<uint8_t, 4, 4>(
8953 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008954}
8955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008956LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8957 armnn::IWorkloadFactory& workloadFactory,
8958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008959{
8960 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8961 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8962
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008963 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008964 std::vector<uint8_t> output({ 1, 3, 5 });
8965
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008966 return MeanTestHelper<uint8_t, 4, 4>(
8967 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008968}
8969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008970LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8971 armnn::IWorkloadFactory& workloadFactory,
8972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008973{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008974 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008975 const unsigned int outputShape[] = { 2 };
8976
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008977 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8978 24 });
8979 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008980
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008981 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8982 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008983 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008984}
8985
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008986LayerTestResult<float, 1> MeanFloatSimpleTest(
8987 armnn::IWorkloadFactory& workloadFactory,
8988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008989{
8990 const unsigned int inputShape[] = { 3, 2 };
8991 const unsigned int outputShape[] = { 1 };
8992
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008993 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8994 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008996 return MeanTestHelper<float, 2, 1>(
8997 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008998}
8999
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009000LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
9001 armnn::IWorkloadFactory& workloadFactory,
9002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01009003{
9004 const unsigned int inputShape[] = { 2, 3, 1, 2 };
9005 const unsigned int outputShape[] = { 3, 1, 2 };
9006
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009007 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
9008 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01009009
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009010 return MeanTestHelper<float, 4, 3>(
9011 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01009012}
9013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009014LayerTestResult<float, 4> MeanFloatKeepDimsTest(
9015 armnn::IWorkloadFactory& workloadFactory,
9016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01009017{
9018 const unsigned int inputShape[] = { 1, 1, 3, 2 };
9019 const unsigned int outputShape[] = { 1, 1, 1, 2 };
9020
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009021 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
9022 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01009023
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009024 return MeanTestHelper<float, 4, 4>(
9025 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01009026}
9027
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009028LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
9029 armnn::IWorkloadFactory& workloadFactory,
9030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01009031{
9032 const unsigned int inputShape[] = { 2, 3, 1, 2 };
9033 const unsigned int outputShape[] = { 1, 3, 1, 1 };
9034
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009035 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
9036 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01009037
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009038 return MeanTestHelper<float, 4, 4>(
9039 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01009040}
9041
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009042LayerTestResult<float, 1> MeanVtsFloat1Test(
9043 armnn::IWorkloadFactory& workloadFactory,
9044 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01009045{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009046 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01009047 const unsigned int outputShape[] = { 2 };
9048
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009049 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
9050 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
9051 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01009052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009053 return MeanTestHelper<float, 3, 1>(
9054 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01009055}
9056
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009057LayerTestResult<float, 3> MeanVtsFloat2Test(
9058 armnn::IWorkloadFactory& workloadFactory,
9059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01009060{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009061 const unsigned int inputShape[] = { 4, 3, 2 };
9062 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01009063
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009064 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
9065 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
9066 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01009067
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009068 return MeanTestHelper<float, 3, 3>(
9069 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009070}
9071
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009072LayerTestResult<float, 3> MeanVtsFloat3Test(
9073 armnn::IWorkloadFactory& workloadFactory,
9074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01009075{
9076 const unsigned int inputShape[] = { 1, 2, 2, 1 };
9077 const unsigned int outputShape[] = { 1, 2, 1 };
9078
9079 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
9080 std::vector<float> output({ 1.5f, 3.5f });
9081
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009082 return MeanTestHelper<float, 4, 3>(
9083 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01009084}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009085
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009086LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9087 armnn::IWorkloadFactory& workloadFactory,
9088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009089{
9090 // Create Initial Tensor
9091 // 1, 2, 3
9092 // 4, 5, 6
9093 // 7, 8, 9
9094
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009095 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9096 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009097
9098 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9099 {1, 2, 3,
9100 4, 5, 6,
9101 7, 8, 9
9102 });
9103
9104 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9105 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9106 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9107 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9108
9109 // Apply MaxPool poolSize = 1x1, stride=2x2
9110 // Result =
9111 // 1, 3
9112 // 7, 9
9113 armnn::Pooling2dDescriptor descriptor;
9114 descriptor.m_PoolHeight = 1;
9115 descriptor.m_PoolWidth = 1;
9116 descriptor.m_StrideX = 2;
9117 descriptor.m_StrideY = 2;
9118 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9119
9120 armnn::Pooling2dQueueDescriptor queueDescriptor;
9121 queueDescriptor.m_Parameters = descriptor;
9122 armnn::WorkloadInfo workloadInfo;
9123 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9124 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9125
9126 // Create the MaxPool
9127 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9128
9129 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9130 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9131 boost::multi_array<float, 4> resultMaxPool;
9132 resultMaxPool.resize(shape);
9133
9134
9135 // Create addition with another tensor the same size
9136 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9137 // with the initial tensor.
9138 // 12, 16
9139 // 24, 28
9140
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009141 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9142 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009143
9144 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9145 {12, 16,
9146 24, 28,
9147 });
9148
9149 // Expected output tensor after MaxPool and Addition.
9150 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9151 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9152 {
9153 13, 19,
9154 31, 37
9155 }));
9156
9157 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9158 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9159
9160 armnn::AdditionQueueDescriptor data;
9161 armnn::WorkloadInfo info;
9162
9163 // Add the output of the MaxPool and the new tensor
9164 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9165 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9166 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9167
9168 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9169
9170 poolingInputHandle->Allocate();
9171 poolingOutputHandle->Allocate();
9172 addInputHandle->Allocate();
9173 addOutputHandle->Allocate();
9174
9175 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9176 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9177
9178 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9179 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9180
Derek Lambertif30f7d32019-04-09 10:25:02 +01009181 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009182 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009183 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009184 addWorkload->Execute();
9185
9186 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9187
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009188 return addRet;
9189}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009191LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9192 armnn::IWorkloadFactory& workloadFactory,
9193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009194{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009195 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009196}
9197
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009198LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9199 armnn::IWorkloadFactory& workloadFactory,
9200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009201{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009202 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009203}
9204
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009205LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9206 armnn::IWorkloadFactory& workloadFactory,
9207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009208{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009209 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009210}
9211
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009212LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9213 armnn::IWorkloadFactory& workloadFactory,
9214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009215{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009216 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009217}
9218
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009219LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9220 armnn::IWorkloadFactory& workloadFactory,
9221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009222{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009223 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009224}
9225
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009226LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009229{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009230 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009231}
9232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009233LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9234 armnn::IWorkloadFactory& workloadFactory,
9235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009236{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009237 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009238}
9239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009240LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9241 armnn::IWorkloadFactory& workloadFactory,
9242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009243{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009244 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009245}
9246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009247LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9248 armnn::IWorkloadFactory& workloadFactory,
9249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009250{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009251 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009252}
9253
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009254LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9255 armnn::IWorkloadFactory& workloadFactory,
9256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009258 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009259}
9260
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009261LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9262 armnn::IWorkloadFactory& workloadFactory,
9263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009264{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009265 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009266}
9267
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009268LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9269 armnn::IWorkloadFactory& workloadFactory,
9270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009271{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009272 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009273}
9274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009275LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9276 armnn::IWorkloadFactory& workloadFactory,
9277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009278{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009279 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009280}
9281
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009282LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9283 armnn::IWorkloadFactory& workloadFactory,
9284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009285{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009286 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009287}
9288
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009289LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9290 armnn::IWorkloadFactory& workloadFactory,
9291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009292{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009293 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009294}
9295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009296LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9297 armnn::IWorkloadFactory& workloadFactory,
9298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009299{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009300 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009301}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009302
nikraj01120522a2019-05-31 11:33:07 +01009303LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9304 armnn::IWorkloadFactory& workloadFactory,
9305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9306{
9307 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9308}
9309
9310LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9311 armnn::IWorkloadFactory& workloadFactory,
9312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9313{
9314 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9315}
9316
9317LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9318 armnn::IWorkloadFactory& workloadFactory,
9319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9320{
9321 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9322}
9323
9324LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9325 armnn::IWorkloadFactory& workloadFactory,
9326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9327{
9328 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9329}
9330
9331LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9332 armnn::IWorkloadFactory& workloadFactory,
9333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9334{
9335 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9336}
9337
9338LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9339 armnn::IWorkloadFactory& workloadFactory,
9340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9341{
9342 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9343}
9344
9345LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9346 armnn::IWorkloadFactory& workloadFactory,
9347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9348{
9349 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9350}
9351
9352LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9353 armnn::IWorkloadFactory& workloadFactory,
9354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9355{
9356 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9357}
9358
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009359namespace {
9360
9361template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009362LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9363 armnn::IWorkloadFactory &workloadFactory,
9364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9365 const armnn::DataLayout& dataLayout,
9366 const unsigned int *inputShape,
9367 const std::vector<T> &inputData,
9368 const std::vector<unsigned int> &blockShape,
9369 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9370 const unsigned int *outputShape,
9371 const std::vector<T> &outputData,
9372 float scale = 1.0f,
9373 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009374{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009375 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9376
9377 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9378 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9379
9380 inputTensorInfo.SetQuantizationScale(scale);
9381 inputTensorInfo.SetQuantizationOffset(offset);
9382
9383 outputTensorInfo.SetQuantizationScale(scale);
9384 outputTensorInfo.SetQuantizationOffset(offset);
9385
9386 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9387
9388 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9389 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9390
9391 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9392 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9393
9394 armnn::BatchToSpaceNdQueueDescriptor data;
9395 data.m_Parameters.m_DataLayout = dataLayout;
9396 data.m_Parameters.m_BlockShape = blockShape;
9397 data.m_Parameters.m_Crops = crops;
9398 armnn::WorkloadInfo info;
9399 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9400 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9401
9402 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9403
9404 inputHandle->Allocate();
9405 outputHandle->Allocate();
9406
9407 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9408
Derek Lambertif30f7d32019-04-09 10:25:02 +01009409 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009410 workload->Execute();
9411
9412 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9413
9414 return result;
9415}
9416
9417} // anonymous namespace
9418
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009419LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9420 armnn::IWorkloadFactory& workloadFactory,
9421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009422{
9423 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009424 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009425
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009426 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009427 // Batch 0, Height 0, Width (2) x Channel (1)
9428 1.0f, 3.0f,
9429 // Batch 0, Height 1, Width (2) x Channel (1)
9430 9.0f, 11.0f,
9431
9432
9433 // Batch 1, Height 0, Width (2) x Channel (1)
9434 2.0f, 4.0f,
9435 // Batch 1, Height 1, Width (2) x Channel (1)
9436 10.0f, 12.0f,
9437
9438
9439 // Batch 2, Height 0, Width (2) x Channel (1)
9440 5.0f, 7.0f,
9441 // Batch 2, Height 1, Width (2) x Channel (1)
9442 13.0f, 15.0f,
9443
9444 // Batch 3, Height 0, Width (2) x Channel (3)
9445 6.0f, 8.0f,
9446 // Batch 3, Height 1, Width (2) x Channel (1)
9447 14.0f, 16.0f
9448 });
9449
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009450 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009451 1.0f, 2.0f, 3.0f, 4.0f,
9452 5.0f, 6.0f, 7.0f, 8.0f,
9453 9.0f, 10.0f, 11.0f, 12.0f,
9454 13.0f, 14.0f, 15.0f, 16.0f
9455 });
9456
9457 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009458 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009459
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009460 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9461 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009462 crops, outputShape, expectedOutput);
9463}
9464
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009465LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9466 armnn::IWorkloadFactory& workloadFactory,
9467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009468{
9469 const unsigned int inputShape[] = {4, 1, 1, 1};
9470 const unsigned int outputShape[] = {1, 2, 2, 1};
9471
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009472 std::vector<float> input({
9473 // Batch 0, Height 0, Width (2) x Channel (1)
9474 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009475 });
9476
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009477 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009478
9479 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009480 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009482 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9483 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9484 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009485}
9486
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009487LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9488 armnn::IWorkloadFactory& workloadFactory,
9489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009490{
9491 const unsigned int inputShape[] = {4, 1, 1, 3};
9492 const unsigned int outputShape[] = {1, 2, 2, 3};
9493
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009494 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009495
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009496 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009497
9498 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009499 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009501 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9502 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9503 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009504}
9505
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009506LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9507 armnn::IWorkloadFactory& workloadFactory,
9508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9509{
9510 const unsigned int inputShape[] = {8, 1, 3, 1};
9511 const unsigned int outputShape[] = {2, 2, 4, 1};
9512
9513 std::vector<float> input({
9514 0.0f, 1.0f, 3.0f,
9515 0.0f, 9.0f, 11.0f,
9516 0.0f, 2.0f, 4.0f,
9517 0.0f, 10.0f, 12.0f,
9518 0.0f, 5.0f, 7.0f,
9519 0.0f, 13.0f, 15.0f,
9520 0.0f, 6.0f, 8.0f,
9521 0.0f, 14.0f, 16.0f
9522 });
9523
9524 std::vector<float> expectedOutput({
9525 1.0f, 2.0f, 3.0f, 4.0f,
9526 5.0f, 6.0f, 7.0f, 8.0f,
9527 9.0f, 10.0f, 11.0f, 12.0f,
9528 13.0f, 14.0f, 15.0f, 16.0f
9529 });
9530
9531 std::vector<unsigned int> blockShape({2, 2});
9532 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9533
9534 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9535 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9536 crops, outputShape, expectedOutput);
9537}
9538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009539LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9540 armnn::IWorkloadFactory &workloadFactory,
9541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009542{
9543 const unsigned int inputShape[] = {4, 3, 1, 1};
9544 const unsigned int outputShape[] = {1, 3, 2, 2};
9545
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009546 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009547
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009548 std::vector<float> expectedOutput({
9549 // Batch 0, Channel 0, Height (2) x Width (2)
9550 1.0f, 4.0f,
9551 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009552
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009553 // Batch 0, Channel 1, Height (2) x Width (2)
9554 2.0f, 5.0f,
9555 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009556
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009557 // Batch 0, Channel 2, Height (2) x Width (2)
9558 3.0f, 6.0f,
9559 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009560 });
9561
9562 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009563 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009564
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009565 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9566 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9567 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009568}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009569
Mike Kelly831faed2018-11-28 11:52:08 +00009570LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009571 armnn::IWorkloadFactory& workloadFactory,
9572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009573{
9574 const unsigned int inputShape[] = {4, 1, 1, 1};
9575 const unsigned int outputShape[] = {1, 1, 2, 2};
9576
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009577 std::vector<float> input({
9578 // Batch 0, Height 0, Width (2) x Channel (1)
9579 1.0f, 2.0f, 3.0f, 4.0f
9580 });
Mike Kelly831faed2018-11-28 11:52:08 +00009581
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009582 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009583
9584 std::vector<unsigned int> blockShape({2, 2});
9585 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9586
9587 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9588 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9589 crops, outputShape, expectedOutput);
9590}
9591
9592LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009593 armnn::IWorkloadFactory& workloadFactory,
9594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009595{
9596 const unsigned int inputShape[] = {4, 3, 1, 1};
9597 const unsigned int outputShape[] = {1, 3, 2, 2};
9598
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009599 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009600
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009601 std::vector<float> expectedOutput({
9602 // Batch 0, Channel 0, Height (2) x Width (2)
9603 1.0f, 7.0f,
9604 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009605
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009606 // Batch 0, Channel 1, Height (2) x Width (2)
9607 3.0f, 9.0f,
9608 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009609
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009610 // Batch 0, Channel 2, Height (2) x Width (2)
9611 5.0f, 11.0f,
9612 6.0f, 12.0f,
9613 });
Mike Kelly831faed2018-11-28 11:52:08 +00009614
9615 std::vector<unsigned int> blockShape({2, 2});
9616 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9617
9618 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9619 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9620 crops, outputShape, expectedOutput);
9621}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009622
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009623LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9624 armnn::IWorkloadFactory& workloadFactory,
9625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009626{
9627 const unsigned int inputShape[] = {4, 2, 2, 1};
9628 const unsigned int outputShape[] = {1, 4, 4, 1};
9629
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009630 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9631 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009632
9633 std::vector<unsigned int> blockShape({2, 2});
9634 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9635
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009636 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9637 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009638}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009639
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009640LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9641 armnn::IWorkloadFactory& workloadFactory,
9642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9643{
9644 const unsigned int inputShape[] = {4, 1, 1, 1};
9645 const unsigned int outputShape[] = {1, 2, 2, 1};
9646
9647 std::vector<uint8_t> input({
9648 // Batch 0, Height 0, Width (2) x Channel (1)
9649 1, 2, 3, 4
9650 });
9651
9652 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9653
9654 std::vector<unsigned int> blockShape({2, 2});
9655 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9656
9657 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9658 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9659 crops, outputShape, expectedOutput);
9660}
9661
9662LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9663 armnn::IWorkloadFactory& workloadFactory,
9664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9665{
9666 const unsigned int inputShape[] = {4, 1, 1, 3};
9667 const unsigned int outputShape[] = {1, 2, 2, 3};
9668
9669 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9670
9671 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9672
9673 std::vector<unsigned int> blockShape({2, 2});
9674 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9675
9676 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9677 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9678 crops, outputShape, expectedOutput);
9679}
9680
9681
9682LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9683 armnn::IWorkloadFactory &workloadFactory,
9684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9685{
9686 const unsigned int inputShape[] = {4, 3, 1, 1};
9687 const unsigned int outputShape[] = {1, 3, 2, 2};
9688
9689 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9690
9691 std::vector<uint8_t> expectedOutput({
9692 // Batch 0, Channel 0, Height (2) x Width (2)
9693 1, 4,
9694 7, 10,
9695
9696 // Batch 0, Channel 1, Height (2) x Width (2)
9697 2, 5,
9698 8, 11,
9699
9700 // Batch 0, Channel 2, Height (2) x Width (2)
9701 3, 6,
9702 9, 12,
9703 });
9704
9705 std::vector<unsigned int> blockShape({2, 2});
9706 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9707
9708 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9709 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9710 crops, outputShape, expectedOutput);
9711}
9712
9713LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9714 armnn::IWorkloadFactory& workloadFactory,
9715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9716{
9717 const unsigned int inputShape[] = {4, 1, 1, 1};
9718 const unsigned int outputShape[] = {1, 1, 2, 2};
9719
9720 std::vector<uint8_t> input({
9721 // Batch 0, Height 0, Width (2) x Channel (1)
9722 1, 2, 3, 4
9723 });
9724
9725 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9726
9727 std::vector<unsigned int> blockShape({2, 2});
9728 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9729
9730 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9731 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9732 crops, outputShape, expectedOutput);
9733}
9734
9735LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9736 armnn::IWorkloadFactory& workloadFactory,
9737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9738{
9739 const unsigned int inputShape[] = {4, 3, 1, 1};
9740 const unsigned int outputShape[] = {1, 3, 2, 2};
9741
9742 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9743
9744 std::vector<uint8_t> expectedOutput({
9745 // Batch 0, Channel 0, Height (2) x Width (2)
9746 1, 7,
9747 2, 8,
9748
9749 // Batch 0, Channel 1, Height (2) x Width (2)
9750 3, 9,
9751 4, 10,
9752
9753 // Batch 0, Channel 2, Height (2) x Width (2)
9754 5, 11,
9755 6, 12,
9756 });
9757
9758 std::vector<unsigned int> blockShape({2, 2});
9759 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9760
9761 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9762 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9763 crops, outputShape, expectedOutput);
9764}
9765
9766LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9767 armnn::IWorkloadFactory& workloadFactory,
9768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9769{
9770 const unsigned int inputShape[] = {8, 1, 1, 3};
9771 const unsigned int outputShape[] = {2, 1, 2, 4};
9772
9773 std::vector<uint8_t> input({
9774 0, 1, 3, 0, 9, 11,
9775 0, 2, 4, 0, 10, 12,
9776 0, 5, 7, 0, 13, 15,
9777 0, 6, 8, 0, 14, 16
9778 });
9779
9780 std::vector<uint8_t> expectedOutput({
9781 1, 2, 3, 4,
9782 5, 6, 7, 8,
9783 9, 10, 11, 12,
9784 13, 14, 15, 16
9785 });
9786
9787 std::vector<unsigned int> blockShape({2, 2});
9788 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9789
9790 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9791 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9792 crops, outputShape, expectedOutput);
9793}
9794
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009795LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9796 armnn::IWorkloadFactory& workloadFactory,
9797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9798{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009799 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009800}
9801
9802LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9803 armnn::IWorkloadFactory& workloadFactory,
9804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9805{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009806 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009807}
9808
9809LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9810 armnn::IWorkloadFactory& workloadFactory,
9811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9812{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009813 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009814}
9815
9816LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9817 armnn::IWorkloadFactory& workloadFactory,
9818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9819{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009820 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009821}
9822
9823LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9824 armnn::IWorkloadFactory& workloadFactory,
9825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9826{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009827 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009828}
9829
9830LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9831 armnn::IWorkloadFactory& workloadFactory,
9832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9833{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009834 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009835}
9836
9837LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9838 armnn::IWorkloadFactory& workloadFactory,
9839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9840{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009841 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009842}
9843
9844LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9845 armnn::IWorkloadFactory& workloadFactory,
9846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9847{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009848 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009849}
9850
9851LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9852 armnn::IWorkloadFactory& workloadFactory,
9853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9854{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009855 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009856}
9857
9858LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9859 armnn::IWorkloadFactory& workloadFactory,
9860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9861{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009862 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009863}
9864
9865LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9866 armnn::IWorkloadFactory& workloadFactory,
9867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9868{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009869 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009870}
9871
9872LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9873 armnn::IWorkloadFactory& workloadFactory,
9874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9875{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009876 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009877}
9878
9879LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9880 armnn::IWorkloadFactory& workloadFactory,
9881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9882{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009883 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009884}
9885
9886LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9887 armnn::IWorkloadFactory& workloadFactory,
9888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9889{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009890 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009891}
9892
9893LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9894 armnn::IWorkloadFactory& workloadFactory,
9895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9896{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009897 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009898}
9899
9900LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9901 armnn::IWorkloadFactory& workloadFactory,
9902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9903{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009904 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009905}
9906
9907LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9908 armnn::IWorkloadFactory& workloadFactory,
9909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9910{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009911 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009912}
9913
9914LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9915 armnn::IWorkloadFactory& workloadFactory,
9916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9917{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009918 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009919}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009920
Matteo Martincigh42666a12019-05-29 08:53:41 +01009921LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9922 armnn::IWorkloadFactory& workloadFactory,
9923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9924{
9925 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9926}
9927
9928LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9929 armnn::IWorkloadFactory& workloadFactory,
9930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9931{
9932 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9933}
9934
9935LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9936 armnn::IWorkloadFactory& workloadFactory,
9937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9938{
9939 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9940}
9941
9942LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9943 armnn::IWorkloadFactory& workloadFactory,
9944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9945{
9946 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9947}
9948
9949LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9950 armnn::IWorkloadFactory& workloadFactory,
9951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9952{
9953 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9954}
9955
9956LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9957 armnn::IWorkloadFactory& workloadFactory,
9958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9959{
9960 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9961}
9962
9963LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9964 armnn::IWorkloadFactory& workloadFactory,
9965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9966{
9967 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9968}
9969
9970LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9971 armnn::IWorkloadFactory& workloadFactory,
9972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9973{
9974 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9975}
9976
9977LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9978 armnn::IWorkloadFactory& workloadFactory,
9979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9980{
9981 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9982}
9983
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009984LayerTestResult<float, 4> Debug4DFloat32Test(
9985 armnn::IWorkloadFactory& workloadFactory,
9986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9987{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009988 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009989}
9990
9991LayerTestResult<float, 3> Debug3DFloat32Test(
9992 armnn::IWorkloadFactory& workloadFactory,
9993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9994{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009995 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009996}
9997
9998LayerTestResult<float, 2> Debug2DFloat32Test(
9999 armnn::IWorkloadFactory& workloadFactory,
10000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10001{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010002 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010003}
10004
10005LayerTestResult<float, 1> Debug1DFloat32Test(
10006 armnn::IWorkloadFactory& workloadFactory,
10007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10008{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010009 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010010}
10011
10012LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10013 armnn::IWorkloadFactory& workloadFactory,
10014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10015{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010016 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010017}
10018
10019LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10020 armnn::IWorkloadFactory& workloadFactory,
10021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10022{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010023 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010024}
10025
10026LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10027 armnn::IWorkloadFactory& workloadFactory,
10028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010030 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010031}
10032
10033LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10034 armnn::IWorkloadFactory& workloadFactory,
10035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10036{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010037 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010038}
Matteo Martincigh49124022019-01-11 13:25:59 +000010039
narpra014951d842019-01-18 16:53:53 +000010040LayerTestResult<float, 1> Gather1DParamsFloatTest(
10041 armnn::IWorkloadFactory& workloadFactory,
10042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10043{
10044 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10045}
10046
10047LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10048 armnn::IWorkloadFactory& workloadFactory,
10049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10050{
10051 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10052}
10053
10054LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10055 armnn::IWorkloadFactory& workloadFactory,
10056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10057{
10058 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10059}
10060
10061LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10062 armnn::IWorkloadFactory& workloadFactory,
10063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10064{
10065 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10066}
10067
10068LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10069 armnn::IWorkloadFactory& workloadFactory,
10070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10071{
10072 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10073}
10074
10075LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10076 armnn::IWorkloadFactory& workloadFactory,
10077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10078{
10079 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10080 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +000010081}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010082
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010083LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010084 armnn::IWorkloadFactory& workloadFactory,
10085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10086{
10087 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10088}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010089
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010090LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10091 armnn::IWorkloadFactory& workloadFactory,
10092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10093{
10094 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10095}
10096
10097LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10098 armnn::IWorkloadFactory& workloadFactory,
10099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10100{
10101 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10102}
10103
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010104LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10105 armnn::IWorkloadFactory& workloadFactory,
10106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10107{
10108 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10109}
10110
10111LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10112 armnn::IWorkloadFactory& workloadFactory,
10113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10114{
10115 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10116}
10117
10118LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10119 armnn::IWorkloadFactory& workloadFactory,
10120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10121{
10122 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10123}