blob: 7f0c933f2b22e89f5a1f79657c188a451271bdc1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000036#include "SplitterTestImpl.hpp"
37#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000038#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000039#include "NormTestImpl.hpp"
40#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000047
telsoa01c577f2c2018-08-31 09:22:23 +010048// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000049static std::vector<float> ConvInput3x8x16({
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
74});
75
telsoa01c577f2c2018-08-31 09:22:23 +010076// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000077static std::vector<float> Bias2({0, 2});
78
telsoa01c577f2c2018-08-31 09:22:23 +010079// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010081boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000082{
83 if(biasEnabled)
84 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000085 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010086 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000087 return bias;
88 }
89 else
90 {
91 return boost::multi_array<T, 1>();
92 }
93}
94
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000095template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000096LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
97 armnn::IWorkloadFactory& workloadFactory,
98 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
99 float qScale,
100 int32_t qOffset,
101 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000102 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000103{
telsoa01c577f2c2018-08-31 09:22:23 +0100104 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000105 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000106 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000109 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000110 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
111 QuantizedVector<T>(qScale, qOffset, {
112 1, 1, 1,
113 1, -1, 1,
114 1, 1, 1,
115 1, 1, 1,
116 1, 1, 1,
117
118 0, 0, 0,
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123
124 2, 2, 2,
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129
130
131 0, 0, 0,
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136
137 1, 1, 1,
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142
143 0, 0, 0,
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0
148 })));
149
telsoa01c577f2c2018-08-31 09:22:23 +0100150 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000151 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000152 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
153 QuantizedVector<T>(qScale, qOffset, {
154 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
155 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
156 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
157 -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f,
160
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
165 })));
166
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000167 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
168 workloadFactory,
169 memoryManager,
170 input,
171 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100172 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000173 expectedOutput,
174 qScale,
175 qOffset,
176 layout);
telsoa014fcda012018-03-09 14:13:49 +0000177}
178
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000179template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
180 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000181LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
182 armnn::IWorkloadFactory& workloadFactory,
183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
184 float qScale,
185 int32_t qOffset,
186 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000187 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000188{
telsoa01c577f2c2018-08-31 09:22:23 +0100189 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000190
telsoa01c577f2c2018-08-31 09:22:23 +0100191 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000192 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000193 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
194
telsoa01c577f2c2018-08-31 09:22:23 +0100195 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000196 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000197 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
198 QuantizedVector<T>(qScale, qOffset, {
199 1, 1, 1,
200 1, -1, 1,
201 1, 1, 1,
202
203 0, 0, 0,
204 0, 0, 0,
205 0, 0, 0,
206
207 2, 2, 2,
208 2, 2, 2,
209 2, 2, 2,
210
211
212 0, 0, 0,
213 0, 0, 0,
214 0, 0, 0,
215
216 1, 1, 1,
217 1, 1, 1,
218 1, 1, 1,
219
220 0, 0, 0,
221 0, 0, 0,
222 0, 0, 0
223 })));
224
telsoa01c577f2c2018-08-31 09:22:23 +0100225 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000226 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000227 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
228 QuantizedVector<T>(qScale, qOffset, {
229 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
230 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
231 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
242 })));
243
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000244 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
245 workloadFactory,
246 memoryManager,
247 input,
248 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100249 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000250 expectedOutput,
251 qScale,
252 qOffset,
253 layout);
telsoa014fcda012018-03-09 14:13:49 +0000254}
255
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000256template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000257LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
258 armnn::IWorkloadFactory& workloadFactory,
259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
260 float qScale,
261 int32_t qOffset,
262 bool biasEnabled,
263 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100264{
265 // Use common single-batch 5x5 image.
266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000267 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100268 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
269 {
270 1, 5, 2, 3,
271 8, 7, 3, 6,
272 3, 3, 9, 1
273 });
274
275
276 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000277 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100278 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
279 4, 5, 6,
280 0, 0, 0,
281 3, 2, 1
282 });
283
284 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000285 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100286
287 const std::vector<float> outputData =
288 {
289 23, 41, 33, 21,
290 44, 65, 76, 52,
291 82, 85, 79, 42
292 };
293
294 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
295
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000296 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
297 workloadFactory,
298 memoryManager,
299 input,
300 kernel,
301 boost::multi_array<T, 1>(),
302 expectedOutput,
303 dataLayout,
304 qScale,
305 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100306}
307
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000309LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
310 armnn::IWorkloadFactory& workloadFactory,
311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
312 float qScale,
313 int32_t qOffset,
314 bool biasEnabled,
315 const armnn::DataLayout& dataLayout)
316{
317 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000318 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000319 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
320 {
321 1, 5, 2, 3, 5,
322 8, 7, 3, 6, 3,
323 3, 3, 9, 1, 9,
324 4, 1, 8, 1, 3,
325 6, 8, 1, 9, 2
326 });
327
328 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000329 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000330 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
331 {
332 4, 5, 6,
333 0, 0, 0,
334 3, 2, 1
335 });
336
337 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000338 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000339
340 const std::vector<T> outputData =
341 {
342 23, 33, 24,
343 91, 99, 48,
344 26, 50, 19
345 };
346
347 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
348
349 uint32_t padLeft = 1;
350 uint32_t padTop = 1;
351 uint32_t padRight = 1;
352 uint32_t padBottom = 1;
353 uint32_t strideX = 2;
354 uint32_t strideY = 2;
355
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000356 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
357 workloadFactory,
358 memoryManager,
359 input,
360 kernel,
361 boost::multi_array<T, 1>(),
362 expectedOutput,
363 dataLayout,
364 qScale,
365 qOffset,
366 padLeft,
367 padTop,
368 padRight,
369 padBottom,
370 strideX,
371 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000372}
373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000374LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
375 armnn::IWorkloadFactory& workloadFactory,
376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
377 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000378 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000380 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
381 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000382}
383
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000384LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
385 armnn::IWorkloadFactory& workloadFactory,
386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
387 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000388 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000389{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000390 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
391 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000392}
393
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000394LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
395 armnn::IWorkloadFactory& workloadFactory,
396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
397 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000398 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000399{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000400 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
401 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000402}
403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000404LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
405 armnn::IWorkloadFactory& workloadFactory,
406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
407 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000409 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
410 workloadFactory,
411 memoryManager,
412 0.f,
413 0,
414 biasEnabled,
415 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100416}
417
Mike Kelly7332ed82018-12-20 17:03:06 +0000418LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
419 armnn::IWorkloadFactory& workloadFactory,
420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
421 bool biasEnabled,
422 const armnn::DataLayout layout)
423{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000424 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
425 workloadFactory,
426 memoryManager,
427 0.f,
428 0,
429 biasEnabled,
430 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000431}
432
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000433LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
434 armnn::IWorkloadFactory& workloadFactory,
435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
436 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000437 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000438{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000439 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
440 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000441}
442
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100443LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
444 armnn::IWorkloadFactory& workloadFactory,
445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
446 bool biasEnabled,
447 const armnn::DataLayout layout)
448{
449return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
450 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
451}
452
453LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
454 armnn::IWorkloadFactory& workloadFactory,
455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
456 bool biasEnabled,
457 const armnn::DataLayout layout)
458{
459 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
460 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
461}
462
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
464 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000465LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
466 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469 float qScale,
470 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000471{
telsoa01c577f2c2018-08-31 09:22:23 +0100472 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000473 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000474 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
475 QuantizedVector<T>(qScale, qOffset, {
476 11,21,31,
477 12,22,32,
478 13,23,33
479 })));
480
telsoa01c577f2c2018-08-31 09:22:23 +0100481 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000482 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000483 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
484 QuantizedVector<T>(qScale, qOffset, {
485 -11,-21,
486 -12,-22,
487 })));
488
telsoa01c577f2c2018-08-31 09:22:23 +0100489// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000490// Manually calculated like this:
491//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
492//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
493//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
494//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
495//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
496//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
497//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000498 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000499 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
500 QuantizedVector<T>(qScale, qOffset, {
501 0, 0, 0, 0, 0, 0,
502 -242, -594, -934, -372, 0, 0,
503 -495, -1190, -1850, -725, 0, 0,
504 -538, -1256, -1916, -748, 0, 0,
505 -273, -626, -946, -363, 0, 0,
506 0, 0, 0, 0, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0
509 })));
510
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000511 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
512 workloadFactory,
513 memoryManager,
514 input,
515 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100516 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000517 expectedOutput,
518 qScale,
519 qOffset,
520 layout,
521 1, // Padding left.
522 2, // Padding top.
523 3, // Padding right.
524 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000525}
526
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000527template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
528 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000529LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
530 armnn::IWorkloadFactory& workloadFactory,
531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000532 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000533 float qScale,
534 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000535{
telsoa01c577f2c2018-08-31 09:22:23 +0100536 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000537 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000538 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
539 QuantizedVector<T>(qScale, qOffset, {
540 11,21,31,41,51,
541 12,22,32,42,52,
542 13,23,33,43,53,
543 14,24,34,44,54,
544 15,25,35,45,55,
545 })));
546
telsoa01c577f2c2018-08-31 09:22:23 +0100547 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000549 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
550 QuantizedVector<T>(qScale, qOffset, {
551 -11,-21,-31,-41,
552 -12,-22,-32,-42,
553 -13,-23,-33,-43,
554 -14,-24,-34,-44,
555 })));
556
telsoa01c577f2c2018-08-31 09:22:23 +0100557 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000559 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
560 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
561 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000562 -7140, -10580, -13940, -9300, -5230,
563 -9590, -14120, -18520, -12290, -6860,
564 -9980, -14560, -18960, -12560, -7000,
565 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100566 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000567 })));
568
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000569 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
570 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000571 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000572 input,
573 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100574 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000575 expectedOutput,
576 qScale,
577 qOffset,
narpra015f703182018-10-26 16:24:58 +0100578 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100579 1, // Padding left.
580 1, // Padding top.
581 2, // Padding right.
582 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100583}
584
Teresa Charlinedeeb162019-06-14 11:09:19 +0100585LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
586 armnn::IWorkloadFactory& workloadFactory,
587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
588 armnn::DataLayout layout)
589{
590 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
591 workloadFactory, memoryManager, layout, 0.0f, 0);
592}
593
594LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
595 armnn::IWorkloadFactory& workloadFactory,
596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
597 armnn::DataLayout layout)
598{
599 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
600 <armnn::DataType::Float32, armnn::DataType::Float32>(
601 workloadFactory, memoryManager, layout, 0.0f, 0);
602}
603
604LayerTestResult<float, 4> Convolution1dTest(
605 armnn::IWorkloadFactory& workloadFactory,
606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
607 bool biasEnabled)
608{
609 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
610 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
611}
612
613LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
614 armnn::IWorkloadFactory& workloadFactory,
615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
616 bool biasEnabled)
617{
618 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
619 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
620}
621
622LayerTestResult<float,4> CompareConvolution2dTest(
623 armnn::IWorkloadFactory& workloadFactory,
624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
625 armnn::IWorkloadFactory& refWorkloadFactory)
626{
627 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
628 workloadFactory, memoryManager, refWorkloadFactory);
629}
630
631template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
632LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
633 armnn::IWorkloadFactory& workloadFactory,
634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
635 const std::vector<float>& inputNoQuantizedValues,
636 armnn::TensorInfo& inputTensorInfo,
637 const std::vector<float>& kernelNoQuantizedValues,
638 armnn::TensorInfo& kernelTensorInfo,
639 const std::vector<float>& outputExpectedNoQuantizedValues,
640 armnn::TensorInfo& outputTensorInfo,
641 uint32_t dilationX,
642 uint32_t dilationY,
643 armnn::DataLayout layout = armnn::DataLayout::NCHW,
644 bool biasEnabled = false
645)
646{
647 float qScale;
648 int32_t qOffset;
649 switch (ArmnnType)
650 {
651 case armnn::DataType::QuantisedAsymm8:
652 {
653 qScale = 0.1f;
654 qOffset = 128;
655 break;
656 }
657 case armnn::DataType::QuantisedSymm16:
658 {
659 qScale = 0.1f;
660 qOffset = 0;
661 break;
662 }
663 case armnn::DataType::Float32:
664 default:
665 {
666 qScale = 0.f;
667 qOffset = 0;
668 break;
669 }
670 }
671
672 inputTensorInfo.SetQuantizationScale(qScale);
673 inputTensorInfo.SetQuantizationOffset(qOffset);
674 kernelTensorInfo.SetQuantizationScale(qScale);
675 kernelTensorInfo.SetQuantizationOffset(qOffset);
676 outputTensorInfo.SetQuantizationScale(qScale);
677 outputTensorInfo.SetQuantizationOffset(qOffset);
678
679 auto input = MakeTensor<T, 4>(inputTensorInfo,
680 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
681 inputTensorInfo.GetQuantizationOffset(),
682 inputNoQuantizedValues)));
683 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
684 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
685 kernelTensorInfo.GetQuantizationOffset(),
686 kernelNoQuantizedValues)));
687 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
688 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
689 outputTensorInfo.GetQuantizationOffset(),
690 outputExpectedNoQuantizedValues)));
691
692 uint32_t padLeft = 0;
693 uint32_t padTop = 0;
694 uint32_t padRight = 0;
695 uint32_t padBottom = 0;
696 uint32_t strideX = 1;
697 uint32_t strideY = 1;
698
699 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
700 workloadFactory,
701 memoryManager,
702 input,
703 kernel,
704 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
705 expectedOutput,
706 qScale,
707 qOffset,
708 layout,
709 padLeft,
710 padTop,
711 padRight,
712 padBottom,
713 strideX,
714 strideY,
715 dilationX,
716 dilationY);
717}
718
719template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
720LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
721 armnn::IWorkloadFactory& workloadFactory,
722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
723 bool biasEnabled,
724 const armnn::DataLayout layout)
725{
726 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
727 std::vector<float> inputNoQuantizedValues =
728 {
729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
733 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
739 };
740
741 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
742 std::vector<float> kernelNoQuantizedValues =
743 {
744 1, 2, 3,
745 4, 5, 6,
746 7, 8, 9
747 };
748
749 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
750 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
751 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
752 std::vector<float> outputExpectedNoQuantizedValues =
753 {
754 6., 5., 5., 5.,
755 6., 5., 5., 5.,
756 6., 5., 5., 5.,
757 3., 2., 2., 2.
758 };
759
760 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
761 workloadFactory,
762 memoryManager,
763 inputNoQuantizedValues,
764 inputTensorInfo,
765 kernelNoQuantizedValues,
766 kernelTensorInfo,
767 outputExpectedNoQuantizedValues,
768 outputTensorInfo,
769 3,
770 3,
771 layout,
772 biasEnabled);
773}
774
775template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
776LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779 bool biasEnabled,
780 const armnn::DataLayout layout)
781{
782 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
783 std::vector<float> inputNoQuantizedValues =
784 {
785 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
789 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
792 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795
796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
797 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
800 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
803 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
806 };
807
808 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
809 std::vector<float> kernelNoQuantizedValues =
810 {
811 1, 2, 3,
812 4, 5, 6,
813 7, 8, 9,
814
815 1, 2, 3,
816 4, 5, 6,
817 7, 8, 9
818 };
819
820 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
821 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
822 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
823 std::vector<float> outputExpectedNoQuantizedValues =
824 {
825 12., 10., 10., 10.,
826 12., 10., 10., 10.,
827 12., 10., 10., 10.,
828 6., 4., 4., 4.
829 };
830
831 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
832 workloadFactory,
833 memoryManager,
834 inputNoQuantizedValues,
835 inputTensorInfo,
836 kernelNoQuantizedValues,
837 kernelTensorInfo,
838 outputExpectedNoQuantizedValues,
839 outputTensorInfo,
840 3,
841 3,
842 layout,
843 biasEnabled);
844}
845
846template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
847Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
848 armnn::IWorkloadFactory&,
849 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
850 bool,
851 armnn::DataLayout);
852
853template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
854Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
855 armnn::IWorkloadFactory&,
856 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
857 bool,
858 armnn::DataLayout);
859
860template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
861Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
862 armnn::IWorkloadFactory&,
863 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
864 bool,
865 armnn::DataLayout);
866
867template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
868Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
869 armnn::IWorkloadFactory&,
870 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
871 bool,
872 armnn::DataLayout);
873
874template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
875Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
876 armnn::IWorkloadFactory&,
877 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
878 bool,
879 armnn::DataLayout);
880
881template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
882Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
883 armnn::IWorkloadFactory&,
884 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
885 bool,
886 armnn::DataLayout);
887
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000888template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
889 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000890LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
891 armnn::IWorkloadFactory& workloadFactory,
892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
893 float qScale,
894 int32_t qOffset,
895 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000896 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100897{
telsoa01c577f2c2018-08-31 09:22:23 +0100898 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000899 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100900 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
901 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
902 0, 1, 2, 3, 4,
903 5, 6, 7, 8, 9,
904 10, 11, 12, 13, 14,
905 15, 16, 17, 18, 19,
906 20, 21, 22, 23, 24,
907
908 25, 26, 27, 28, 29,
909 30, 31, 32, 33, 34,
910 35, 36, 37, 38, 39,
911 40, 41, 42, 43, 44,
912 45, 46, 47, 48, 49
913 })));
914
telsoa01c577f2c2018-08-31 09:22:23 +0100915 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000916 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100917 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
918 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
919 32, 31, 30, 29,
920 28, 27, 26, 25,
921 24, 23, 22, 21,
922 20, 19, 18, 17,
923
924 16, 15, 14, 13,
925 12, 11, 10, 9,
926 8, 7, 6, 5,
927 4, 3, 2, 1
928 })));
929
telsoa01c577f2c2018-08-31 09:22:23 +0100930 // Expected output is 1 batch of a 2-channel 5x5 image.
931 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000932 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100933 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
934 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
935 1062, 1580, 1850, 1530, 1117,
936 2140, 3108, 3500, 2842, 2042,
937 3580, 5068, 5460, 4342, 3062,
938 3618, 5072, 5390, 4248, 2971,
939 3074, 4282, 4510, 3533, 2457,
940 1550, 2284, 2362, 1955, 1428,
941 2910, 4206, 4342, 3528, 2536,
942 3390, 4886, 5022, 4068, 2916,
943 3566, 5056, 5182, 4133, 2922,
944 3100, 4352, 4452, 3517, 2465
945 })));
946
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000947 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
948 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000949 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100950 input,
951 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100952 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100953 expectedOutput,
954 qScale,
955 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100956 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100957 1, // Padding left.
958 1, // Padding top.
959 2, // Padding right.
960 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100961 1, // strideX
962 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000963}
964
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000965template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
966 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000967LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
968 armnn::IWorkloadFactory& workloadFactory,
969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
970 float qScale,
971 int32_t qOffset,
972 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000974 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100975 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
976 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
977 0, 25,
978 1, 26,
979 2, 27,
980 3, 28,
981 4, 29,
982
983 5, 30,
984 6, 31,
985 7, 32,
986 8, 33,
987 9, 34,
988
989 10, 35,
990 11, 36,
991 12, 37,
992 13, 38,
993 14, 39,
994
995 15, 40,
996 16, 41,
997 17, 42,
998 18, 43,
999 19, 44,
1000
1001 20, 45,
1002 21, 46,
1003 22, 47,
1004 23, 48,
1005 24, 49
1006 })));
1007
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001008 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001009 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1010 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001011 32, 31, 30, 29,
1012 28, 27, 26, 25,
1013 24, 23, 22, 21,
1014 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001015
Matteo Martincigh747ef822018-12-18 09:26:39 +00001016 16, 15, 14, 13,
1017 12, 11, 10, 9,
1018 8, 7, 6, 5,
1019 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001020 })));
1021
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001022 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001023 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1024 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1025 1062, 1550,
1026 1580, 2284,
1027 1850, 2362,
1028 1530, 1955,
1029 1117, 1428,
1030
1031 2140, 2910,
1032 3108, 4206,
1033 3500, 4342,
1034 2842, 3528,
1035 2042, 2536,
1036
1037 3580, 3390,
1038 5068, 4886,
1039 5460, 5022,
1040 4342, 4068,
1041 3062, 2916,
1042
1043 3618, 3566,
1044 5072, 5056,
1045 5390, 5182,
1046 4248, 4133,
1047 2971, 2922,
1048
1049 3074, 3100,
1050 4282, 4352,
1051 4510, 4452,
1052 3533, 3517,
1053 2457, 2465
1054 })));
1055
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001056 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
1057 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001058 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001059 input,
1060 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001061 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001062 expectedOutput,
1063 qScale,
1064 qOffset,
1065 1, // Padding left.
1066 1, // Padding top.
1067 2, // Padding right.
1068 2, // Padding bottom.
1069 1, // strideX
1070 1); // strideY
1071}
1072
Bruno Goncalves22972f02019-04-26 21:03:24 -03001073template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1074 typename T = armnn::ResolveType<ArmnnType>>
1075LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1076 armnn::IWorkloadFactory& workloadFactory,
1077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1078 float qScale,
1079 int32_t qOffset,
1080 bool biasEnabled)
1081{
1082 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
1083 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
1084 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
1085 0, 0, 0, 0, 0, 0, 0, 0, 0,
1086 0, 0, 0, 0, 0, 0, 0, 0, 0,
1087 0, 0, 0, 0, 0, 0, 0, 0, 0,
1088 0, 0, 0, 1, 1, 1, 0, 0, 0,
1089 0, 0, 0, 1, 1, 1, 0, 0, 0,
1090 0, 0, 0, 1, 1, 1, 0, 0, 0,
1091 0, 0, 0, 0, 0, 0, 0, 0, 0,
1092 0, 0, 0, 0, 0, 0, 0, 0, 0,
1093 0, 0, 0, 0, 0, 0, 0, 0, 0
1094 })));
1095
1096 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1097 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
1098 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
1099 1, 2, 3,
1100 4, 5, 6,
1101 7, 8, 9
1102 })));
1103
1104 uint32_t padLeft = 0;
1105 uint32_t padTop = 0;
1106 uint32_t padRight = 0;
1107 uint32_t padBottom = 0;
1108 uint32_t strideX = 1;
1109 uint32_t strideY = 1;
1110 uint32_t dilationX = 3;
1111 uint32_t dilationY = 3;
1112
1113 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
1114 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
1115 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
1116 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1117 5, 5, 5,
1118 5, 5, 5,
1119 5, 5, 5
1120 })));
1121
1122 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
1123 workloadFactory,
1124 memoryManager,
1125 input,
1126 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001127 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001128 expectedOutput,
1129 qScale,
1130 qOffset,
1131 padLeft,
1132 padTop,
1133 padRight,
1134 padBottom,
1135 strideX,
1136 strideY,
1137 dilationX,
1138 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001139}
1140
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001141LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1142 armnn::IWorkloadFactory& workloadFactory,
1143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1144 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001145 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001146{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001147 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001148 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001149}
1150
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001151LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1152 armnn::IWorkloadFactory& workloadFactory,
1153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1154 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001155{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001156 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1157 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001158}
1159
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001160LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1161 armnn::IWorkloadFactory& workloadFactory,
1162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1163 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001164 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001165{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001166 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001167 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001168}
1169
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001170LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1171 armnn::IWorkloadFactory& workloadFactory,
1172 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1173 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001174 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001176 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001177 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001178}
1179
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001180LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1181 armnn::IWorkloadFactory& workloadFactory,
1182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1183 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001184 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001186 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001187 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001188}
1189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001190LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1191 armnn::IWorkloadFactory& workloadFactory,
1192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1193 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001194 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001195{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001196 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001197 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001198}
1199
Bruno Goncalves22972f02019-04-26 21:03:24 -03001200LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1201 armnn::IWorkloadFactory& workloadFactory,
1202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1203{
1204 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1205 workloadFactory,
1206 memoryManager,
1207 0.f,
1208 0,
1209 false);
1210}
1211
Ruomei Yan88d44b82019-05-23 14:29:06 +01001212LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1213 armnn::IWorkloadFactory& workloadFactory,
1214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1215 bool biasEnabled,
1216 const armnn::DataLayout layout)
1217{
1218 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1219 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1220}
1221
1222LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1223 armnn::IWorkloadFactory& workloadFactory,
1224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1225 bool biasEnabled,
1226 const armnn::DataLayout layout)
1227{
1228 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1229 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1230}
1231
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001232LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001233 armnn::IWorkloadFactory& workloadFactory,
1234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1235 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001236 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001237{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001238 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1239 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001240}
1241
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001242LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1243 armnn::IWorkloadFactory& workloadFactory,
1244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1245 armnn::IWorkloadFactory& refWorkloadFactory,
1246 const armnn::DataLayout layout)
1247{
1248 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1249 workloadFactory, memoryManager, refWorkloadFactory, layout);
1250}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001251
1252LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1253 armnn::IWorkloadFactory& workloadFactory,
1254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001255{
1256 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1257 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001258 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001259}
1260
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001261LayerTestResult<float,4> SimpleNormalizationWithinTest(
1262 armnn::IWorkloadFactory& workloadFactory,
1263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001264{
1265 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1266 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001267 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001268}
1269
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001270LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1271 armnn::IWorkloadFactory& workloadFactory,
1272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001273{
1274 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1275 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001276 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001277}
1278
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001279LayerTestResult<float,2> SimpleSoftmaxTest(
1280 armnn::IWorkloadFactory& workloadFactory,
1281 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1282 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001283{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001284 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001285}
1286
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001287LayerTestResult<float,3> Simple3dSoftmaxTest(
1288 armnn::IWorkloadFactory& workloadFactory,
1289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1290 float beta)
1291{
1292 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1293}
1294
1295LayerTestResult<float,4> Simple4dSoftmaxTest(
1296 armnn::IWorkloadFactory& workloadFactory,
1297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1298 float beta)
1299{
1300 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1301}
1302
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001303LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1304 armnn::IWorkloadFactory& workloadFactory,
1305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1306 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001307{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001308 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001309}
1310
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001311LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1312 armnn::IWorkloadFactory& workloadFactory,
1313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1314 float beta)
1315{
1316 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1317}
1318
1319LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1320 armnn::IWorkloadFactory& workloadFactory,
1321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1322 float beta)
1323{
1324 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1325}
1326
nikraj01248683f2019-05-29 16:46:50 +01001327LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1328 armnn::IWorkloadFactory& workloadFactory,
1329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1330 float beta)
1331{
1332 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1333}
1334
1335LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1336 armnn::IWorkloadFactory& workloadFactory,
1337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1338 float beta)
1339{
1340 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1341}
1342
1343LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1344 armnn::IWorkloadFactory& workloadFactory,
1345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1346 float beta)
1347{
1348 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1349}
1350
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001351LayerTestResult<float,4> CompareNormalizationTest(
1352 armnn::IWorkloadFactory& workloadFactory,
1353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1354 armnn::IWorkloadFactory& refWorkloadFactory,
1355 armnn::NormalizationAlgorithmChannel normChannel,
1356 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001357{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001358 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001359}
1360
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001361LayerTestResult<float,2> CompareSoftmaxTest(
1362 armnn::IWorkloadFactory& workloadFactory,
1363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001364 armnn::IWorkloadFactory& refWorkloadFactory,
1365 float beta)
1366{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001367 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1368 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001369}
1370
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001371LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1372 armnn::IWorkloadFactory& workloadFactory,
1373 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001374 armnn::IWorkloadFactory& refWorkloadFactory,
1375 float beta)
1376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001377 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1378 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001379}
1380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001381std::vector<LayerTestResult<float,3>> SplitterTest(
1382 armnn::IWorkloadFactory& workloadFactory,
1383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001384{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001385 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001386}
1387
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001388std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1389 armnn::IWorkloadFactory& workloadFactory,
1390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001392 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001393}
1394
Ruomei Yan25339c32019-05-28 16:48:20 +01001395std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1396 armnn::IWorkloadFactory& workloadFactory,
1397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1398{
1399 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1400}
1401
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001402LayerTestResult<float, 3> CopyViaSplitterTest(
1403 armnn::IWorkloadFactory& workloadFactory,
1404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001405{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001406 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001407}
1408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001409LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1410 armnn::IWorkloadFactory& workloadFactory,
1411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001412{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001413 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001414}
1415
Ruomei Yan25339c32019-05-28 16:48:20 +01001416LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1417 armnn::IWorkloadFactory& workloadFactory,
1418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1419{
1420 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1421}
1422
telsoa01c577f2c2018-08-31 09:22:23 +01001423LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001424 armnn::IWorkloadFactory& workloadFactory,
1425 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001426{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001427 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001428 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1429 { 2., 3., 3., 4. }));
1430
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001431 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001432 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1433 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1434 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001435 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001436 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001437}
1438
1439LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001440 armnn::IWorkloadFactory& workloadFactory,
1441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001442{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001443 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001444 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1445 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1446 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1447
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001448 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001449 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1450 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1451 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1452 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1453 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1454 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1455 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1456 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001457 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1458 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001459}
1460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001461LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1462 armnn::IWorkloadFactory& workloadFactory,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001464{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001465 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001466 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1467 {2., 3., 3., 4.}));
1468
1469
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001470 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001471 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1472 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1473 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1474
Conor Kennedyb9971c92019-05-07 07:14:23 +01001475 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001476 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001477}
1478
Conor Kennedyb9971c92019-05-07 07:14:23 +01001479LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1480 armnn::IWorkloadFactory& workloadFactory,
1481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1482{
1483 const float qScale = 1.0f;
1484 const int32_t qOffset = 0;
1485
1486 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1487 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1488
1489 armnn::TensorInfo inputDesc({2, 2}, datatype);
1490 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1491 std::vector<float>{2., 3., 3., 4.}));
1492
1493 armnn::TensorInfo outputDesc({2, 4}, datatype);
1494 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1495 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1496 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1497
1498 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1499 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1500
1501}
1502
1503LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1504 armnn::IWorkloadFactory& workloadFactory,
1505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1506{
1507 const float qScale = 1.0f;
1508 const int32_t qOffset = 0;
1509
1510 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1511 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1512
1513 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1514 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1515 std::vector<float>({ 2., 3., 3., 4. })));
1516
1517 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1518 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1519 qOffset, std::vector<float>(
1520 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1521 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1522
1523 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1524 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1525}
1526
1527LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1528 armnn::IWorkloadFactory& workloadFactory,
1529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1530{
1531 const float qScale = 2.0f;
1532 const int32_t qOffset = 0;
1533
1534 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1535 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1536
1537 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1538 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1539 qOffset, std::vector<float>(
1540 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1541 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1542
1543 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1544 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1545 qOffset, std::vector<float>(
1546 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1547 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1548 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1549 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1550 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1551 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1552
1553 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1554 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1555}
1556
1557LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1558 armnn::IWorkloadFactory& workloadFactory,
1559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1560{
1561 const float qScale = 1.0f;
1562 const int32_t qOffset = 0;
1563
1564 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1565
1566 armnn::TensorInfo inputDesc({2, 2}, datatype);
1567 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1568 qOffset, std::vector<float>{2., 3., 3., 4.}));
1569
1570 armnn::TensorInfo outputDesc({2, 4}, datatype);
1571 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1572 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1573 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1574
1575 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1576 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1577}
1578
Jim Flynn4ed6c832019-05-20 11:02:46 +01001579LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001580 armnn::IWorkloadFactory& workloadFactory,
1581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001582{
surmeh013537c2c2018-05-18 16:31:43 +01001583 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001584 unsigned int outputHeight = 6;
1585 unsigned int outputChannels = 3;
1586
surmeh013537c2c2018-05-18 16:31:43 +01001587 unsigned int inputWidth1 = 3;
1588 unsigned int inputHeight1 = 6;
1589 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001590
surmeh013537c2c2018-05-18 16:31:43 +01001591 unsigned int inputWidth2 = 3;
1592 unsigned int inputHeight2 = 6;
1593 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001594
telsoa01c577f2c2018-08-31 09:22:23 +01001595 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001596 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1597 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1598 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001599
1600 LayerTestResult<float,3> ret(outputTensorInfo);
1601
telsoa014fcda012018-03-09 14:13:49 +00001602 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001603 {
1604 1.0f, 2.0f, 3.0f,
1605 4.0f, 5.0f, 6.0f,
1606 7.0f, 8.0f, 9.0f,
1607 10.0f, 11.0f, 12.0f,
1608 13.0f, 14.0f, 15.0f,
1609 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001610
surmeh013537c2c2018-05-18 16:31:43 +01001611 19.0f, 20.0f, 21.0f,
1612 22.0f, 23.0f, 24.0f,
1613 25.0f, 26.0f, 27.0f,
1614 28.0f, 29.0f, 30.0f,
1615 31.0f, 32.0f, 33.0f,
1616 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001617
surmeh013537c2c2018-05-18 16:31:43 +01001618 37.0f, 38.0f, 39.0f,
1619 40.0f, 41.0f, 42.0f,
1620 43.0f, 44.0f, 45.0f,
1621 46.0f, 47.0f, 48.0f,
1622 49.0f, 50.0f, 51.0f,
1623 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001624 })
1625 );
1626
telsoa014fcda012018-03-09 14:13:49 +00001627 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1628 {
surmeh013537c2c2018-05-18 16:31:43 +01001629 1.0f, 2.0f, 3.0f,
1630 4.0f, 5.0f, 6.0f,
1631 7.0f, 8.0f, 9.0f,
1632 10.0f, 11.0f, 12.0f,
1633 13.0f, 14.0f, 15.0f,
1634 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001635
surmeh013537c2c2018-05-18 16:31:43 +01001636 19.0f, 20.0f, 21.0f,
1637 22.0f, 23.0f, 24.0f,
1638 25.0f, 26.0f, 27.0f,
1639 28.0f, 29.0f, 30.0f,
1640 31.0f, 32.0f, 33.0f,
1641 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001642 })
1643 );
1644
1645 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1646 {
surmeh013537c2c2018-05-18 16:31:43 +01001647 37.0f, 38.0f, 39.0f,
1648 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001649 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001650 46.0f, 47.0f, 48.0f,
1651 49.0f, 50.0f, 51.0f,
1652 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001653 })
1654 );
1655
telsoa01c577f2c2018-08-31 09:22:23 +01001656 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001657 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001658
telsoa01c577f2c2018-08-31 09:22:23 +01001659 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001660 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001661
telsoa014fcda012018-03-09 14:13:49 +00001662 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1663
1664 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1665
1666 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1667 subTensorsSupported ?
1668 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1669 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1670
1671 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1672 subTensorsSupported ?
1673 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1674 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1675
Jim Flynne242f2d2019-05-22 14:24:13 +01001676 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001677 armnn::WorkloadInfo info;
1678 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1679 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001680 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1681
1682 data.m_ViewOrigins.push_back(window1);
1683 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001684
Jim Flynn4ed6c832019-05-20 11:02:46 +01001685 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001686
1687 inputHandle1->Allocate();
1688 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001689 outputHandle->Allocate();
1690
1691 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1692 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001693
Derek Lambertif30f7d32019-04-09 10:25:02 +01001694 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001695 workload->Execute();
1696
1697 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1698
1699 return ret;
1700}
1701
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001702LayerTestResult<float,4> AdditionTest(
1703 armnn::IWorkloadFactory& workloadFactory,
1704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001705{
1706 unsigned int batchSize = 2;
1707 unsigned int channels = 2;
1708 unsigned int height = 2;
1709 unsigned int width = 3;
1710
1711 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1712 armnn::TensorInfo outputTensorInfo;
1713
1714 unsigned int shape[] = {batchSize, channels, height, width};
1715
1716 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1717 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1718 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1719
1720
1721 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1722 {
1723 0.0f, 2.0f, 1.0f,
1724 0.2f, 1.0f, 2.0f,
1725
1726 1.0f, 2.0f, 1.0f,
1727 0.2f, 1.0f, 2.0f,
1728
1729 0.0f, 2.0f, 1.0f,
1730 4.2f, 1.0f, 2.0f,
1731
1732 0.0f, 0.0f, 1.0f,
1733 0.2f, 1.0f, 2.0f,
1734 }));
1735
1736 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1737 {
1738 1.0f, 2.0f, 1.0f,
1739 0.0f, 1.0f, 2.0f,
1740
1741 1.0f, 2.0f, -2.0f,
1742 0.2f, 1.0f, 2.0f,
1743
1744 0.0f, 2.0f, 1.0f,
1745 4.2f, 0.0f, -3.0f,
1746
1747 0.0f, 0.0f, 1.0f,
1748 0.7f, 1.0f, 5.0f,
1749 }));
1750
1751 LayerTestResult<float,4> ret(outputTensorInfo);
1752 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1753 {
1754 1.0f, 4.0f, 2.0f,
1755 0.2f, 2.0f, 4.0f,
1756
1757 2.0f, 4.0f, -1.0f,
1758 0.4f, 2.0f, 4.0f,
1759
1760 0.0f, 4.0f, 2.0f,
1761 8.4f, 1.0f, -1.0f,
1762
1763 0.0f, 0.0f, 2.0f,
1764 0.9f, 2.0f, 7.0f,
1765 }));
1766
1767 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1768 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1769 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1770
1771 armnn::AdditionQueueDescriptor data;
1772 armnn::WorkloadInfo info;
1773 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1774 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1775 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1776
1777 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1778
1779 inputHandle1->Allocate();
1780 inputHandle2->Allocate();
1781 outputHandle->Allocate();
1782
1783 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1784 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1785
Derek Lambertif30f7d32019-04-09 10:25:02 +01001786 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001787 workload->Execute();
1788
1789 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1790
1791 return ret;
1792}
1793
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001794template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001795LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1796 armnn::IWorkloadFactory& workloadFactory,
1797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001798 float qScale,
1799 int32_t qOffset)
1800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001801 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1802 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1803 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001804
1805 if (armnn::IsQuantizedType<T>())
1806 {
1807 inputTensorInfo1.SetQuantizationScale(qScale);
1808 inputTensorInfo1.SetQuantizationOffset(qOffset);
1809 inputTensorInfo2.SetQuantizationScale(qScale);
1810 inputTensorInfo2.SetQuantizationOffset(qOffset);
1811 outputTensorInfo.SetQuantizationScale(qScale);
1812 outputTensorInfo.SetQuantizationOffset(qOffset);
1813 }
1814
1815 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1816 {
1817 0.0f,
1818 1.0f,
1819
1820 2.0f,
1821 3.0f,
1822
1823 4.0f,
1824 5.0f,
1825 }));
1826
1827 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1828 {
1829 0.5f, 1.5f, 2.5f,
1830 3.5f, 4.5f, 5.5f,
1831 }));
1832
1833 LayerTestResult<T,4> ret(outputTensorInfo);
1834 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1835 {
1836 0.5f, 1.5f, 2.5f,
1837 4.5f, 5.5f, 6.5f,
1838
1839 2.5f, 3.5f, 4.5f,
1840 6.5f, 7.5f, 8.5f,
1841
1842 4.5f, 5.5f, 6.5f,
1843 8.5f, 9.5f, 10.5f,
1844 }));
1845
1846 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1847 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1848 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1849
1850 armnn::AdditionQueueDescriptor data;
1851 armnn::WorkloadInfo info;
1852 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1853 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1854 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1855
1856 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1857
1858 inputHandle1->Allocate();
1859 inputHandle2->Allocate();
1860 outputHandle->Allocate();
1861
1862 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1863 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1864
Derek Lambertif30f7d32019-04-09 10:25:02 +01001865 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001866 workload->Execute();
1867
1868 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1869
1870 return ret;
1871}
1872
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001873template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001874LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1875 armnn::IWorkloadFactory& workloadFactory,
1876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001877 float qScale,
1878 int32_t qOffset)
1879{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001880 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1881 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1882 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001883
1884 if (armnn::IsQuantizedType<T>())
1885 {
1886 inputTensorInfo1.SetQuantizationScale(qScale);
1887 inputTensorInfo1.SetQuantizationOffset(qOffset);
1888 inputTensorInfo2.SetQuantizationScale(qScale);
1889 inputTensorInfo2.SetQuantizationOffset(qOffset);
1890 outputTensorInfo.SetQuantizationScale(qScale);
1891 outputTensorInfo.SetQuantizationOffset(qOffset);
1892 }
1893
1894 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1895 {
1896 0.0f, 1.0f, 2.0f,
1897 3.0f, 4.0f, 5.0f,
1898 6.0f, 7.0f, 8.0f,
1899 9.0f, 10.0f, 11.0f,
1900 12.0f, 13.0f, 14.0f,
1901 15.0f, 16.0f, 17.0f,
1902 }));
1903
1904 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1905 {
1906 0.5f,
1907 }));
1908
1909 LayerTestResult<T,4> ret(outputTensorInfo);
1910 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1911 {
1912 0.5f, 1.5f, 2.5f,
1913 3.5f, 4.5f, 5.5f,
1914 6.5f, 7.5f, 8.5f,
1915 9.5f, 10.5f, 11.5f,
1916 12.5f, 13.5f, 14.5f,
1917 15.5f, 16.5f, 17.5f,
1918 }));
1919
1920 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1921 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1922 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1923
1924 armnn::AdditionQueueDescriptor data;
1925 armnn::WorkloadInfo info;
1926 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1927 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1928 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1929
1930 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1931
1932 inputHandle1->Allocate();
1933 inputHandle2->Allocate();
1934 outputHandle->Allocate();
1935
1936 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1937 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1938
Derek Lambertif30f7d32019-04-09 10:25:02 +01001939 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001940 workload->Execute();
1941
1942 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1943
1944 return ret;
1945}
1946
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001947LayerTestResult<float, 4> AdditionBroadcastTest(
1948 armnn::IWorkloadFactory& workloadFactory,
1949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001950{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001951 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1952 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001953}
1954
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001955LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1956 armnn::IWorkloadFactory& workloadFactory,
1957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001958{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001959 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1960 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001961}
1962
Sadik Armagan2999a022019-04-09 14:20:12 +01001963LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1964 armnn::IWorkloadFactory& workloadFactory,
1965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1966{
1967 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1968 workloadFactory, memoryManager, 2.f, 0);
1969}
1970
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001971LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1972 armnn::IWorkloadFactory& workloadFactory,
1973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001975 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1976 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001977}
1978
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001979LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001982{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001983 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1984 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001985}
1986
Sadik Armagan2999a022019-04-09 14:20:12 +01001987LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1988 armnn::IWorkloadFactory& workloadFactory,
1989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1990{
1991 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1992 workloadFactory, memoryManager, 0.1333333f, 0);
1993}
1994
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001995LayerTestResult<float,4> CompareAdditionTest(
1996 armnn::IWorkloadFactory& workloadFactory,
1997 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1998 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001999{
2000 unsigned int batchSize = 4;
2001 unsigned int channels = 1;
2002 unsigned int height = 2;
2003 unsigned int width = 3;
2004
2005 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2006 armnn::TensorInfo outputTensorInfo;
2007
2008 unsigned int shape[] = {batchSize, channels, height, width};
2009
2010 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2011 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2012 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2013
2014 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2015 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2016
2017 LayerTestResult<float,4> ret(outputTensorInfo);
2018
2019 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2020 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2021 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2022
2023 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2024 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2025 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2026
2027 armnn::AdditionQueueDescriptor data;
2028 armnn::WorkloadInfo info;
2029 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2030 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2031 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2032
2033 armnn::AdditionQueueDescriptor refData = data;
2034 armnn::WorkloadInfo refInfo = info;
2035 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2036 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2037 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2038
2039 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2040 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2041
2042 inputHandle1->Allocate();
2043 inputHandle2->Allocate();
2044 outputHandle->Allocate();
2045 inputHandle1Ref->Allocate();
2046 inputHandle2Ref->Allocate();
2047 outputHandleRef->Allocate();
2048
2049 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2050 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2051 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2052 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2053
Derek Lambertif30f7d32019-04-09 10:25:02 +01002054 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002055 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002056 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002057 workloadRef->Execute();
2058
2059 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2060 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2061
2062 return ret;
2063}
2064
surmeh01bceff2f2018-03-29 16:29:27 +01002065namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002066template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002067LayerTestResult<T, 4> DivisionTestHelper(
2068 armnn::IWorkloadFactory& workloadFactory,
2069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2070 const unsigned int shape0[4],
2071 const std::vector<T>& values0,
2072 float scale0,
2073 int32_t offset0,
2074 const unsigned int shape1[4],
2075 const std::vector<T> & values1,
2076 float scale1,
2077 int32_t offset1,
2078 const unsigned int outShape[4],
2079 const std::vector<T> & outValues,
2080 float outScale,
2081 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002082{
Sadik Armagan2999a022019-04-09 14:20:12 +01002083 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2084 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2085 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002086
David Beck5cd01f32018-09-12 16:00:08 +01002087 inputTensorInfo0.SetQuantizationScale(scale0);
2088 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002089
David Beck5cd01f32018-09-12 16:00:08 +01002090 inputTensorInfo1.SetQuantizationScale(scale1);
2091 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002092
David Beck5cd01f32018-09-12 16:00:08 +01002093 outputTensorInfo.SetQuantizationScale(outScale);
2094 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002095
David Beck5cd01f32018-09-12 16:00:08 +01002096 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2097 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002098
David Beck5cd01f32018-09-12 16:00:08 +01002099 LayerTestResult<T, 4> result(outputTensorInfo);
2100 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002101
David Beck5cd01f32018-09-12 16:00:08 +01002102 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2103 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2104 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002105
David Beck5cd01f32018-09-12 16:00:08 +01002106 armnn::DivisionQueueDescriptor data;
2107 armnn::WorkloadInfo info;
2108 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2109 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2110 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002111
David Beck5cd01f32018-09-12 16:00:08 +01002112 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002113
David Beck5cd01f32018-09-12 16:00:08 +01002114 inputHandle0->Allocate();
2115 inputHandle1->Allocate();
2116 outputHandle->Allocate();
2117
2118 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2119 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2120
Derek Lambertif30f7d32019-04-09 10:25:02 +01002121 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002122 workload->Execute();
2123
2124 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2125
2126 return result;
2127}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002128} // anonymous namespace
2129
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002130LayerTestResult<float,4> DivisionByZeroTest(
2131 armnn::IWorkloadFactory& workloadFactory,
2132 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002133{
2134 const unsigned int width = 2;
2135 const unsigned int height = 2;
2136 const unsigned int channelCount = 2;
2137 const unsigned int batchSize = 2;
2138
2139 unsigned int shape[] = { batchSize, channelCount, height, width };
2140
2141 std::vector<float> input0({
2142 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2143 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2144
2145 std::vector<float> input1({
2146 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2147 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2148
2149 std::vector<float> output({
2150 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2151 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2152
Sadik Armagan2999a022019-04-09 14:20:12 +01002153 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2154 memoryManager,
2155 shape, input0, 1.0f, 0,
2156 shape, input1, 1.0f, 0,
2157 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002158}
2159
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002160LayerTestResult<float,4> DivisionTest(
2161 armnn::IWorkloadFactory& workloadFactory,
2162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002163{
2164 const unsigned int width = 2;
2165 const unsigned int height = 2;
2166 const unsigned int channelCount = 2;
2167 const unsigned int batchSize = 2;
2168
2169 unsigned int shape[] = { batchSize, channelCount, height, width };
2170
2171 std::vector<float> input0({
2172 2, 2, 2, 2, 3, 3, 3, 3,
2173 4, 4, 4, 4, 5, 5, 5, 5 });
2174
2175 std::vector<float> input1({
2176 1, 1, 1, 1, 2, 2, 2, 2,
2177 4, 4, 4, 4, 4, 4, 4, 4 });
2178
2179 std::vector<float> output({
2180 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2181 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2182
David Beck5cd01f32018-09-12 16:00:08 +01002183
Sadik Armagan2999a022019-04-09 14:20:12 +01002184 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2185 memoryManager,
2186 shape, input0, 1.0f, 0,
2187 shape, input1, 1.0f, 0,
2188 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002189}
2190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002191LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2192 armnn::IWorkloadFactory& workloadFactory,
2193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002194{
2195 unsigned int shape0[] = { 1, 2, 2, 2 };
2196 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2197
2198 unsigned int shape1[] = { 1, 1, 1, 1 };
2199 std::vector<float> input1({ 2 });
2200
2201 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2202
David Beck5cd01f32018-09-12 16:00:08 +01002203
Sadik Armagan2999a022019-04-09 14:20:12 +01002204 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2205 memoryManager,
2206 shape0, input0, 1.0f, 0,
2207 shape1, input1, 1.0f, 0,
2208 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002209}
2210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002211LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2212 armnn::IWorkloadFactory& workloadFactory,
2213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002214{
2215 unsigned int shape0[] = { 1, 3, 3, 2 };
2216 std::vector<float> input0({
2217 1, 4, 3, 8, 5, 12,
2218 7, 16, 9, 20, 11, 24,
2219 13, 28, 15, 32, 17, 36});
2220
2221 unsigned int shape1[] = { 1, 1, 1, 2 };
2222 std::vector<float> input1({ 1, 2 });
2223
2224 std::vector<float> output({
2225 1, 2, 3, 4, 5, 6,
2226 7, 8, 9, 10, 11, 12,
2227 13, 14, 15, 16, 17, 18});
2228
Sadik Armagan2999a022019-04-09 14:20:12 +01002229 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2230 memoryManager,
2231 shape0, input0, 1.0f, 0,
2232 shape1, input1, 1.0f, 0,
2233 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002234}
2235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002236LayerTestResult<uint8_t,4> DivisionUint8Test(
2237 armnn::IWorkloadFactory& workloadFactory,
2238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002239{
2240 const unsigned int width = 2;
2241 const unsigned int height = 2;
2242 const unsigned int channelCount = 2;
2243 const unsigned int batchSize = 2;
2244
2245 unsigned int shape[] = { batchSize, channelCount, height, width };
2246
2247 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2248 4, 4, 4, 4, 5, 5, 5, 5 });
2249
2250 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2251 4, 4, 4, 4, 4, 4, 4, 4 });
2252
2253 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2254 4, 4, 4, 4, 5, 5, 5, 5});
2255
2256
Sadik Armagan2999a022019-04-09 14:20:12 +01002257 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2258 memoryManager,
2259 shape, input0, 1.0f, 0,
2260 shape, input1, 1.0f, 0,
2261 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002262}
2263
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002264LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2265 armnn::IWorkloadFactory& workloadFactory,
2266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002267{
2268 unsigned int shape0[] = { 1, 2, 2, 2 };
2269 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2270
2271 unsigned int shape1[] = { 1, 1, 1, 1 };
2272 std::vector<uint8_t> input1({ 2 });
2273
2274 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2275
Sadik Armagan2999a022019-04-09 14:20:12 +01002276 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2277 memoryManager,
2278 shape0, input0, 1.0f, 0,
2279 shape1, input1, 1.0f, 0,
2280 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002281}
2282
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002283LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2284 armnn::IWorkloadFactory& workloadFactory,
2285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002286{
2287 unsigned int shape0[] = { 1, 3, 3, 2 };
2288 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2289 7, 16, 9, 20, 11, 24,
2290 13, 28, 15, 32, 17, 36});
2291
2292 unsigned int shape1[] = { 1, 1, 1, 2 };
2293 std::vector<uint8_t> input1({ 1, 2 });
2294
2295 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2296 7, 8, 9, 10, 11, 12,
2297 13, 14, 15, 16, 17, 18});
2298
Sadik Armagan2999a022019-04-09 14:20:12 +01002299 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2300 memoryManager,
2301 shape0, input0, 1.0f, 0,
2302 shape1, input1, 1.0f, 0,
2303 shape0, output, 1.0f, 0);
2304}
2305
2306LayerTestResult<int16_t,4> DivisionInt16Test(
2307 armnn::IWorkloadFactory& workloadFactory,
2308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2309{
2310 unsigned int shape[] = { 2, 2, 2, 2 };
2311
2312 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2313 4, 4, 4, 4, 5, 5, 5, 5 });
2314
2315 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2316 4, 4, 4, 4, 4, 4, 4, 4 });
2317
2318 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2319 4, 4, 4, 4, 5, 5, 5, 5});
2320
2321
2322 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2323 memoryManager,
2324 shape, input0, 1.0f, 0,
2325 shape, input1, 1.0f, 0,
2326 shape, output, 0.25f, 0);
2327}
2328
2329LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2330 armnn::IWorkloadFactory& workloadFactory,
2331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2332{
2333 unsigned int shape0[] = { 1, 2, 2, 2 };
2334 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2335
2336 unsigned int shape1[] = { 1, 1, 1, 1 };
2337 std::vector<int16_t> input1({ 2 });
2338
2339 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2340
2341 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2342 memoryManager,
2343 shape0, input0, 1.0f, 0,
2344 shape1, input1, 1.0f, 0,
2345 shape0, output, 1.0f, 0);
2346}
2347
2348LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2349 armnn::IWorkloadFactory& workloadFactory,
2350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2351{
2352 unsigned int shape0[] = { 1, 3, 3, 2 };
2353 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2354 7, 16, 9, 20, 11, 24,
2355 13, 28, 15, 32, 17, 36});
2356
2357 unsigned int shape1[] = { 1, 1, 1, 2 };
2358 std::vector<int16_t> input1({ 1, 2 });
2359
2360 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2361 7, 8, 9, 10, 11, 12,
2362 13, 14, 15, 16, 17, 18});
2363
2364 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2365 memoryManager,
2366 shape0, input0, 1.0f, 0,
2367 shape1, input1, 1.0f, 0,
2368 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002369}
2370
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002371template<typename DescriptorType>
2372std::unique_ptr<armnn::IWorkload> CreateWorkload(
2373 const armnn::IWorkloadFactory& workloadFactory,
2374 const armnn::WorkloadInfo& info,
2375 const DescriptorType& descriptor)
2376{
2377 return CreateWorkload(workloadFactory, info, descriptor);
2378};
2379
2380template<>
2381std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2382 const armnn::IWorkloadFactory& workloadFactory,
2383 const armnn::WorkloadInfo& info,
2384 const armnn::MaximumQueueDescriptor& descriptor)
2385{
2386 return workloadFactory.CreateMaximum(descriptor, info);
2387}
2388
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002389template<>
2390std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2391 const armnn::IWorkloadFactory& workloadFactory,
2392 const armnn::WorkloadInfo& info,
2393 const armnn::MinimumQueueDescriptor& descriptor)
2394{
2395 return workloadFactory.CreateMinimum(descriptor, info);
2396}
2397
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002398template<>
2399std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2400 const armnn::IWorkloadFactory& workloadFactory,
2401 const armnn::WorkloadInfo& info,
2402 const armnn::EqualQueueDescriptor& descriptor)
2403{
2404 return workloadFactory.CreateEqual(descriptor, info);
2405}
2406
FrancisMurtagh878f0232018-12-19 10:56:15 +00002407template<>
2408std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2409 const armnn::IWorkloadFactory& workloadFactory,
2410 const armnn::WorkloadInfo& info,
2411 const armnn::GreaterQueueDescriptor& descriptor)
2412{
2413 return workloadFactory.CreateGreater(descriptor, info);
2414}
2415
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002416namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002417
2418template <typename Descriptor,
2419 armnn::DataType ArmnnTypeInput,
2420 armnn::DataType ArmnnTypeOutput,
2421 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2422 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2423LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2424 armnn::IWorkloadFactory & workloadFactory,
2425 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2426 const unsigned int shape0[4], std::vector<TInput> values0,
2427 const unsigned int shape1[4], std::vector<TInput> values1,
2428 const unsigned int outShape[4], std::vector<TOutput> outValues,
2429 float qScale = 0.0f, int qOffset = 0)
2430{
2431 const size_t dimensionCount = 4;
2432 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2433 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2434 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2435
2436 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2437 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2438
2439 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002440 {
kevmay012b4d88e2019-01-24 14:05:09 +00002441 inputTensorInfo0.SetQuantizationScale(qScale);
2442 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002443
kevmay012b4d88e2019-01-24 14:05:09 +00002444 inputTensorInfo1.SetQuantizationScale(qScale);
2445 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002446
kevmay012b4d88e2019-01-24 14:05:09 +00002447 outputTensorInfo.SetQuantizationScale(qScale);
2448 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002449 }
kevmay012b4d88e2019-01-24 14:05:09 +00002450
2451 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2452
2453 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2454 {
2455 ret.compareBoolean = true;
2456 }
2457
2458 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2459 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2460 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2461
2462 Descriptor data;
2463 armnn::WorkloadInfo info;
2464 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2465 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2466 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2467 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2468
2469 inputHandle0->Allocate();
2470 inputHandle1->Allocate();
2471 outputHandle->Allocate();
2472
2473 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2474 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2475
Derek Lambertif30f7d32019-04-09 10:25:02 +01002476 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002477 ExecuteWorkload(*workload, memoryManager);
2478
2479 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2480
2481 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2482 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002483}
2484
kevmay012b4d88e2019-01-24 14:05:09 +00002485template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2486LayerTestResult<T, 4> ElementwiseTestHelper(
2487 armnn::IWorkloadFactory & workloadFactory,
2488 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2489 const unsigned int shape0[4], std::vector<T> values0,
2490 const unsigned int shape1[4], std::vector<T> values1,
2491 const unsigned int outShape[4], std::vector<T> outValues,
2492 float qScale = 0.0f, int qOffset = 0)
2493{
2494 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2495 (workloadFactory,
2496 memoryManager,
2497 shape0,
2498 values0,
2499 shape1,
2500 values1,
2501 outShape,
2502 outValues,
2503 qScale,
2504 qOffset);
2505}
2506}
2507
2508LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002510{
2511 const unsigned int width = 2;
2512 const unsigned int height = 2;
2513 const unsigned int channelCount = 2;
2514 const unsigned int batchSize = 2;
2515
2516 unsigned int shape[] = { batchSize, channelCount, height, width };
2517
2518 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2519 3, 3, 3, 3, 4, 4, 4, 4 });
2520
2521 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2522 5, 5, 5, 5, 4, 4, 4, 4 });
2523
kevmay012b4d88e2019-01-24 14:05:09 +00002524 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2525 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002526
kevmay012b4d88e2019-01-24 14:05:09 +00002527 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002528 workloadFactory,
2529 memoryManager,
2530 shape,
2531 input0,
2532 shape,
2533 input1,
2534 shape,
2535 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002536}
2537
kevmay012b4d88e2019-01-24 14:05:09 +00002538LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002539 armnn::IWorkloadFactory& workloadFactory,
2540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2541{
2542 unsigned int shape0[] = { 1, 2, 2, 2 };
2543 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2544
2545 unsigned int shape1[] = { 1, 1, 1, 1 };
2546 std::vector<float> input1({ 1 });
2547
kevmay012b4d88e2019-01-24 14:05:09 +00002548 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002549
kevmay012b4d88e2019-01-24 14:05:09 +00002550 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002551 workloadFactory,
2552 memoryManager,
2553 shape0,
2554 input0,
2555 shape1,
2556 input1,
2557 shape0,
2558 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002559}
2560
kevmay012b4d88e2019-01-24 14:05:09 +00002561LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002562 armnn::IWorkloadFactory& workloadFactory,
2563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2564{
2565 const unsigned int shape0[] = { 1, 2, 2, 3 };
2566 const unsigned int shape1[] = { 1, 1, 1, 3 };
2567
2568 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2569 7, 8, 9, 10, 11, 12 });
2570
2571 std::vector<float> input1({ 1, 2, 3});
2572
kevmay012b4d88e2019-01-24 14:05:09 +00002573 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2574 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002575
kevmay012b4d88e2019-01-24 14:05:09 +00002576 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002577 workloadFactory,
2578 memoryManager,
2579 shape0,
2580 input0,
2581 shape1,
2582 input1,
2583 shape0,
2584 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002585}
2586
2587LayerTestResult<uint8_t, 4> EqualUint8Test(
2588 armnn::IWorkloadFactory& workloadFactory,
2589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2590{
2591 unsigned int shape[] = { 2, 2, 2, 2 };
2592
2593 // See dequantized values to the right.
2594 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002595 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002596
2597 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2598 3, 3, 3, 3, 5, 5, 5, 5 });
2599
2600 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2601 1, 1, 1, 1, 0, 0, 0, 0 });
2602
kevmay012b4d88e2019-01-24 14:05:09 +00002603 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2604 armnn::DataType::QuantisedAsymm8,
2605 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002606 workloadFactory,
2607 memoryManager,
2608 shape,
2609 input0,
2610 shape,
2611 input1,
2612 shape,
2613 output,
2614 1.0f,
2615 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002616}
2617
2618LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2619 armnn::IWorkloadFactory& workloadFactory,
2620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2621{
2622 const unsigned int shape0[] = { 1, 2, 2, 3 };
2623 const unsigned int shape1[] = { 1, 1, 1, 1 };
2624
2625 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2626 7, 8, 9, 10, 11, 12 });
2627
2628 std::vector<uint8_t> input1({ 1 });
2629
2630 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2631 0, 0, 0, 0, 0, 0 });
2632
kevmay012b4d88e2019-01-24 14:05:09 +00002633 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2634 armnn::DataType::QuantisedAsymm8,
2635 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002636 workloadFactory,
2637 memoryManager,
2638 shape0,
2639 input0,
2640 shape1,
2641 input1,
2642 shape0,
2643 output,
2644 1.0f,
2645 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002646}
2647
2648LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2649 armnn::IWorkloadFactory& workloadFactory,
2650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2651{
2652 const unsigned int shape0[] = { 1, 2, 2, 3 };
2653 const unsigned int shape1[] = { 1, 1, 1, 3 };
2654
2655 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2656 7, 8, 9, 10, 11, 12 });
2657
2658 std::vector<uint8_t> input1({ 1, 1, 3});
2659
2660 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2661 0, 0, 0, 0, 0, 0 });
2662
kevmay012b4d88e2019-01-24 14:05:09 +00002663 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2664 armnn::DataType::QuantisedAsymm8,
2665 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002666 workloadFactory,
2667 memoryManager,
2668 shape0,
2669 input0,
2670 shape1,
2671 input1,
2672 shape0,
2673 output,
2674 1.0f,
2675 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002676}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002677
kevmay012b4d88e2019-01-24 14:05:09 +00002678LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2680{
2681 const unsigned int width = 2;
2682 const unsigned int height = 2;
2683 const unsigned int channelCount = 2;
2684 const unsigned int batchSize = 2;
2685
2686 unsigned int shape[] = { batchSize, channelCount, height, width };
2687
2688 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2689 3, 3, 3, 3, 4, 4, 4, 4 });
2690
2691 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2692 5, 5, 5, 5, 4, 4, 4, 4 });
2693
kevmay012b4d88e2019-01-24 14:05:09 +00002694 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2695 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002696
kevmay012b4d88e2019-01-24 14:05:09 +00002697 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002698 workloadFactory,
2699 memoryManager,
2700 shape,
2701 input0,
2702 shape,
2703 input1,
2704 shape,
2705 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002706}
2707
kevmay012b4d88e2019-01-24 14:05:09 +00002708LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2711{
2712 unsigned int shape0[] = { 1, 2, 2, 2 };
2713 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2714
2715 unsigned int shape1[] = { 1, 1, 1, 1 };
2716 std::vector<float> input1({ 1 });
2717
kevmay012b4d88e2019-01-24 14:05:09 +00002718 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002719
kevmay012b4d88e2019-01-24 14:05:09 +00002720 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002721 workloadFactory,
2722 memoryManager,
2723 shape0,
2724 input0,
2725 shape1,
2726 input1,
2727 shape0,
2728 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002729}
2730
kevmay012b4d88e2019-01-24 14:05:09 +00002731LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002732 armnn::IWorkloadFactory& workloadFactory,
2733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2734{
2735 const unsigned int shape0[] = { 1, 2, 2, 3 };
2736 const unsigned int shape1[] = { 1, 1, 1, 3 };
2737
2738 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2739 7, 8, 9, 10, 11, 12 });
2740
2741 std::vector<float> input1({ 1, 3, 2});
2742
kevmay012b4d88e2019-01-24 14:05:09 +00002743 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2744 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002745
kevmay012b4d88e2019-01-24 14:05:09 +00002746 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002747 workloadFactory,
2748 memoryManager,
2749 shape0,
2750 input0,
2751 shape1,
2752 input1,
2753 shape0,
2754 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002755}
2756
2757LayerTestResult<uint8_t, 4> GreaterUint8Test(
2758 armnn::IWorkloadFactory& workloadFactory,
2759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2760{
2761 unsigned int shape[] = { 2, 2, 2, 2 };
2762
2763 // See dequantized values to the right.
2764 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2765 3, 3, 3, 3, 5, 5, 5, 5 });
2766
2767 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2768 2, 2, 2, 2, 5, 5, 5, 5 });
2769
2770 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2771 1, 1, 1, 1, 0, 0, 0, 0 });
2772
kevmay012b4d88e2019-01-24 14:05:09 +00002773 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2774 armnn::DataType::QuantisedAsymm8,
2775 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002776 workloadFactory,
2777 memoryManager,
2778 shape,
2779 input0,
2780 shape,
2781 input1,
2782 shape,
2783 output,
2784 1.0f,
2785 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002786}
2787
2788LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2789 armnn::IWorkloadFactory& workloadFactory,
2790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2791{
2792 const unsigned int shape0[] = { 1, 2, 2, 3 };
2793 const unsigned int shape1[] = { 1, 1, 1, 1 };
2794
2795 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2796 7, 8, 9, 10, 11, 12 });
2797
2798 std::vector<uint8_t> input1({ 1 });
2799
2800 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2801 1, 1, 1, 1, 1, 1 });
2802
kevmay012b4d88e2019-01-24 14:05:09 +00002803 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2804 armnn::DataType::QuantisedAsymm8,
2805 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002806 workloadFactory,
2807 memoryManager,
2808 shape0,
2809 input0,
2810 shape1,
2811 input1,
2812 shape0,
2813 output,
2814 1.0f,
2815 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002816}
2817
2818LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2819 armnn::IWorkloadFactory& workloadFactory,
2820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2821{
2822 const unsigned int shape0[] = { 1, 2, 2, 3 };
2823 const unsigned int shape1[] = { 1, 1, 1, 3 };
2824
2825 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2826 7, 8, 9, 10, 11, 12 });
2827
2828 std::vector<uint8_t> input1({ 1, 1, 3});
2829
2830 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2831 1, 1, 1, 1, 1, 1 });
2832
kevmay012b4d88e2019-01-24 14:05:09 +00002833 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2834 armnn::DataType::QuantisedAsymm8,
2835 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002836 workloadFactory,
2837 memoryManager,
2838 shape0,
2839 input0,
2840 shape1,
2841 input1,
2842 shape0,
2843 output,
2844 1.0f,
2845 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002846}
2847
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002848LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2850{
2851 const unsigned int width = 2;
2852 const unsigned int height = 2;
2853 const unsigned int channelCount = 2;
2854 const unsigned int batchSize = 2;
2855
2856 unsigned int shape[] = { batchSize, channelCount, height, width };
2857
2858 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2859 3, 3, 3, 3, 4, 4, 4, 4 });
2860
2861 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2862 4, 4, 4, 4, 5, 5, 5, 5 });
2863
2864 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2865 4, 4, 4, 4, 5, 5, 5, 5 });
2866
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002867 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2868 workloadFactory,
2869 memoryManager,
2870 shape,
2871 input0,
2872 shape,
2873 input1,
2874 shape,
2875 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002876}
2877
2878LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2879 armnn::IWorkloadFactory& workloadFactory,
2880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2881{
2882 unsigned int shape0[] = { 1, 2, 2, 2 };
2883 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2884
2885 unsigned int shape1[] = { 1, 1, 1, 1 };
2886 std::vector<float> input1({ 2 });
2887
2888 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002890 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2891 workloadFactory,
2892 memoryManager,
2893 shape0,
2894 input0,
2895 shape1,
2896 input1,
2897 shape0,
2898 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002899}
2900
2901LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2902 armnn::IWorkloadFactory& workloadFactory,
2903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2904{
2905 const unsigned int shape0[] = { 1, 2, 2, 3 };
2906 const unsigned int shape1[] = { 1, 1, 1, 3 };
2907
2908 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2909 7, 8, 9, 10, 11, 12 });
2910
2911 std::vector<float> input1({ 1, 2, 3});
2912
2913 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002914 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002915
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002916 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2917 workloadFactory,
2918 memoryManager,
2919 shape0,
2920 input0,
2921 shape1,
2922 input1,
2923 shape0,
2924 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002925}
2926
2927LayerTestResult<uint8_t, 4> MaximumUint8Test(
2928 armnn::IWorkloadFactory& workloadFactory,
2929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2930{
2931 unsigned int shape[] = { 2, 2, 2, 2 };
2932
2933 // See dequantized values to the right.
2934 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2935 3, 3, 3, 3, 4, 4, 4, 4 });
2936
2937 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2938 4, 4, 4, 4, 5, 5, 5, 5 });
2939
2940 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2941 4, 4, 4, 4, 5, 5, 5, 5 });
2942
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002943 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2944 workloadFactory,
2945 memoryManager,
2946 shape,
2947 input0,
2948 shape,
2949 input1,
2950 shape,
2951 output,
2952 1.0f,
2953 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002954}
2955
2956LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2957 armnn::IWorkloadFactory& workloadFactory,
2958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2959{
2960 const unsigned int shape0[] = { 1, 2, 2, 3 };
2961 const unsigned int shape1[] = { 1, 1, 1, 1 };
2962
2963 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2964 7, 8, 9, 10, 11, 12 });
2965
2966 std::vector<uint8_t> input1({2});
2967
2968 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2969 7, 8, 9, 10, 11, 12 });
2970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002971 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2972 workloadFactory,
2973 memoryManager,
2974 shape0,
2975 input0,
2976 shape1,
2977 input1,
2978 shape0,
2979 output,
2980 1.0f,
2981 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002982}
2983
2984LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2985 armnn::IWorkloadFactory& workloadFactory,
2986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2987{
2988 const unsigned int shape0[] = { 1, 2, 2, 3 };
2989 const unsigned int shape1[] = { 1, 1, 1, 3 };
2990
2991 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2992 7, 8, 9, 10, 11, 12 });
2993
2994 std::vector<uint8_t> input1({ 1, 10, 3});
2995
2996 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2997 7, 10, 9, 10, 11, 12 });
2998
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002999 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3000 workloadFactory,
3001 memoryManager,
3002 shape0,
3003 input0,
3004 shape1,
3005 input1,
3006 shape0,
3007 output,
3008 1.0f,
3009 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003010}
3011
Sadik Armagan2999a022019-04-09 14:20:12 +01003012LayerTestResult<int16_t, 4> MaximumInt16Test(
3013 armnn::IWorkloadFactory& workloadFactory,
3014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3015{
3016 unsigned int shape[] = { 2, 2, 2, 2 };
3017
3018 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3019 3, 3, 3, 3, 4, 4, 4, 4 });
3020
3021 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3022 4, 4, 4, 4, 5, 5, 5, 5 });
3023
3024 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3025 4, 4, 4, 4, 5, 5, 5, 5 });
3026
3027 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3028 workloadFactory,
3029 memoryManager,
3030 shape,
3031 input0,
3032 shape,
3033 input1,
3034 shape,
3035 output,
3036 1.0f,
3037 0);
3038}
3039
3040LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3041 armnn::IWorkloadFactory& workloadFactory,
3042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3043{
3044 const unsigned int shape0[] = { 1, 2, 2, 3 };
3045 const unsigned int shape1[] = { 1, 1, 1, 1 };
3046
3047 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3048 7, 8, 9, 10, 11, 12 });
3049
3050 std::vector<int16_t> input1({2});
3051
3052 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3053 7, 8, 9, 10, 11, 12 });
3054
3055 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3056 workloadFactory,
3057 memoryManager,
3058 shape0,
3059 input0,
3060 shape1,
3061 input1,
3062 shape0,
3063 output,
3064 1.0f,
3065 0);
3066}
3067
3068LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3069 armnn::IWorkloadFactory& workloadFactory,
3070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3071{
3072 const unsigned int shape0[] = { 1, 2, 2, 3 };
3073 const unsigned int shape1[] = { 1, 1, 1, 3 };
3074
3075 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3076 7, 8, 9, 10, 11, 12 });
3077
3078 std::vector<int16_t> input1({ 1, 10, 3});
3079
3080 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3081 7, 10, 9, 10, 11, 12 });
3082
3083 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3084 workloadFactory,
3085 memoryManager,
3086 shape0,
3087 input0,
3088 shape1,
3089 input1,
3090 shape0,
3091 output,
3092 1.0f,
3093 0);
3094}
3095
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003096LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3097 armnn::IWorkloadFactory& workloadFactory,
3098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3099{
3100 unsigned int shape0[] = { 1, 2, 2, 2 };
3101 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3102
3103 unsigned int shape1[] = { 1, 1, 1, 1 };
3104 std::vector<float> input1({ 2 });
3105
3106 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3107
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003108 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3109 workloadFactory,
3110 memoryManager,
3111 shape0,
3112 input0,
3113 shape1,
3114 input1,
3115 shape0,
3116 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003117}
3118
3119
3120LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3121 armnn::IWorkloadFactory& workloadFactory,
3122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3123{
3124 unsigned int shape0[] = { 1, 2, 2, 2 };
3125 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3126
3127 unsigned int shape1[] = { 1, 1, 1, 1 };
3128 std::vector<float> input1({ 5 });
3129
3130 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3131
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003132 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3133 workloadFactory,
3134 memoryManager,
3135 shape0,
3136 input0,
3137 shape1,
3138 input1,
3139 shape0,
3140 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003141}
3142
3143LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3144 armnn::IWorkloadFactory & workloadFactory,
3145 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3146{
3147 const unsigned int shape0[] = { 1, 2, 2, 3 };
3148 const unsigned int shape1[] = { 1, 1, 1, 3 };
3149
3150 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3151 7, 1, 2, 3, 4, 5 });
3152
3153 std::vector<uint8_t> input1({ 1, 2, 3});
3154
3155 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3156 1, 1, 2, 1, 2, 3 });
3157
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003158 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3159 workloadFactory,
3160 memoryManager,
3161 shape0,
3162 input0,
3163 shape1,
3164 input1,
3165 shape0,
3166 output,
3167 1.0f,
3168 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003169}
3170
Sadik Armagan2999a022019-04-09 14:20:12 +01003171LayerTestResult<int16_t, 4> MinimumInt16Test(
3172 armnn::IWorkloadFactory& workloadFactory,
3173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3174{
3175 unsigned int shape[] = { 2, 2, 2, 2 };
3176
3177 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3178 3, 3, 3, 3, 4, 4, 4, 4 });
3179
3180 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3181 4, 4, 4, 4, 5, 5, 5, 5 });
3182
3183 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3184 3, 3, 3, 3, 4, 4, 4, 4 });
3185
3186 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3187 workloadFactory,
3188 memoryManager,
3189 shape,
3190 input0,
3191 shape,
3192 input1,
3193 shape,
3194 output,
3195 1.0f,
3196 0);
3197}
3198
3199LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3200 armnn::IWorkloadFactory& workloadFactory,
3201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3202{
3203 const unsigned int shape0[] = { 1, 2, 2, 3 };
3204 const unsigned int shape1[] = { 1, 1, 1, 1 };
3205
3206 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3207 7, 8, 9, 10, 11, 12 });
3208
3209 std::vector<int16_t> input1({2});
3210
3211 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3212 2, 2, 2, 2, 2, 2 });
3213
3214 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3215 workloadFactory,
3216 memoryManager,
3217 shape0,
3218 input0,
3219 shape1,
3220 input1,
3221 shape0,
3222 output,
3223 1.0f,
3224 0);
3225}
3226
3227LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3228 armnn::IWorkloadFactory& workloadFactory,
3229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3230{
3231 const unsigned int shape0[] = { 1, 2, 2, 3 };
3232 const unsigned int shape1[] = { 1, 1, 1, 3 };
3233
3234 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3235 7, 8, 9, 10, 11, 12 });
3236
3237 std::vector<int16_t> input1({ 1, 10, 3});
3238
3239 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3240 1, 8, 3, 1, 10, 3 });
3241
3242 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3243 workloadFactory,
3244 memoryManager,
3245 shape0,
3246 input0,
3247 shape1,
3248 input1,
3249 shape0,
3250 output,
3251 1.0f,
3252 0);
3253}
3254
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003255namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003256LayerTestResult<float,4> MultiplicationTestHelper(
3257 armnn::IWorkloadFactory& workloadFactory,
3258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3259 const unsigned int shape0[4],
3260 const std::vector<float> & values0,
3261 const unsigned int shape1[4],
3262 const std::vector<float> & values1,
3263 const unsigned int outShape[4],
3264 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003265{
surmeh01bceff2f2018-03-29 16:29:27 +01003266 const size_t dimensionCount = 4;
3267 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3268 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3269 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003270
surmeh01bceff2f2018-03-29 16:29:27 +01003271 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3272 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003273
3274 LayerTestResult<float,4> ret(outputTensorInfo);
3275
3276 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3277 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3278 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3279
3280 armnn::MultiplicationQueueDescriptor data;
3281 armnn::WorkloadInfo info;
3282 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3283 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3284 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3285
3286 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3287
3288 inputHandle0->Allocate();
3289 inputHandle1->Allocate();
3290 outputHandle->Allocate();
3291
3292 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3293 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3294
Derek Lambertif30f7d32019-04-09 10:25:02 +01003295 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003296 workload->Execute();
3297
3298 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3299
surmeh01bceff2f2018-03-29 16:29:27 +01003300 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003301 return ret;
3302}
surmeh01bceff2f2018-03-29 16:29:27 +01003303} // anonymous namespace
3304
3305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003306LayerTestResult<float,4> MultiplicationTest(
3307 armnn::IWorkloadFactory& workloadFactory,
3308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003309{
3310 const unsigned int width = 2;
3311 const unsigned int height = 2;
3312 const unsigned int channelCount = 2;
3313 const unsigned int batchSize = 2;
3314
3315 unsigned int shape[] = { batchSize, channelCount, height, width };
3316
3317 std::vector<float> input0({
3318 1, 1, 1, 1, 2, 2, 2, 2,
3319 3, 3, 3, 3, 4, 4, 4, 4 });
3320
3321 std::vector<float> input1({
3322 2, 2, 2, 2, 3, 3, 3, 3,
3323 4, 4, 4, 4, 5, 5, 5, 5 });
3324
3325 std::vector<float> output({
3326 2, 2, 2, 2, 6, 6, 6, 6,
3327 12, 12, 12, 12, 20, 20, 20, 20 });
3328
3329 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003330 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003331 shape,
3332 input0,
3333 shape,
3334 input1,
3335 shape,
3336 output);
3337}
3338
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003339LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3340 armnn::IWorkloadFactory& workloadFactory,
3341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003342{
3343 unsigned int shape0[] = { 1, 2, 2, 2 };
3344 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3345
3346 unsigned int shape1[] = { 1, 1, 1, 1 };
3347 std::vector<float> input1({ 2 });
3348
3349 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3350
3351 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003352 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003353 shape0,
3354 input0,
3355 shape1,
3356 input1,
3357 shape0,
3358 output);
3359}
3360
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003361LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3362 armnn::IWorkloadFactory& workloadFactory,
3363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003364{
3365 unsigned int shape0[] = { 1, 3, 3, 2 };
3366 std::vector<float> input0({
3367 1, 2, 3, 4, 5, 6,
3368 7, 8, 9, 10, 11, 12,
3369 13, 14, 15, 16, 17, 18});
3370
3371 unsigned int shape1[] = { 1, 1, 1, 2 };
3372 std::vector<float> input1({ 1, 2 });
3373
3374 std::vector<float> output({
3375 1, 4, 3, 8, 5, 12,
3376 7, 16, 9, 20, 11, 24,
3377 13, 28, 15, 32, 17, 36});
3378
3379 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003380 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003381 shape0,
3382 input0,
3383 shape1,
3384 input1,
3385 shape0,
3386 output);
3387}
telsoa014fcda012018-03-09 14:13:49 +00003388
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003389LayerTestResult<float,4> CompareMultiplicationTest(
3390 armnn::IWorkloadFactory& workloadFactory,
3391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3392 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003393{
3394 const unsigned int width = 16;
3395 const unsigned int height = 32;
3396 const unsigned int channelCount = 2;
3397 const unsigned int batchSize = 5;
3398
3399 armnn::TensorInfo inputTensorInfo0;
3400 armnn::TensorInfo inputTensorInfo1;
3401 armnn::TensorInfo outputTensorInfo;
3402
3403 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3404
3405 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3406 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3407 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3408
3409 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3410
3411 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3412 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3413
3414 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3415 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3416 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3417
3418 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3419 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3420 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3421
3422 armnn::MultiplicationQueueDescriptor data;
3423 armnn::WorkloadInfo info;
3424 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3425 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3426 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3427
3428 armnn::MultiplicationQueueDescriptor refData = data;
3429 armnn::WorkloadInfo refInfo = info;
3430 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3431 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3432 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3433
3434 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3435 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3436
3437 inputHandle0->Allocate();
3438 inputHandle1->Allocate();
3439 outputHandle->Allocate();
3440 inputHandle0Ref->Allocate();
3441 inputHandle1Ref->Allocate();
3442 outputHandleRef->Allocate();
3443
3444 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3445 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3446 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3447 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3448
Derek Lambertif30f7d32019-04-09 10:25:02 +01003449 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003450 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003451 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003452 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003453 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3454 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3455
3456 return comparisonResult;
3457}
3458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003459LayerTestResult<float,4> CompareBatchNormTest(
3460 armnn::IWorkloadFactory& workloadFactory,
3461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3462 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003463{
3464 const unsigned int width = 2;
3465 const unsigned int height = 3;
3466 const unsigned int channels = 5;
3467 const unsigned int batchSize = 3;
3468
3469 armnn::TensorInfo inputTensorInfo;
3470 armnn::TensorInfo outputTensorInfo;
3471 armnn::TensorInfo tensorInfo;
3472
3473 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3474 constexpr unsigned int tensorShape[] = {channels};
3475
3476 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3477 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3478 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3479
3480 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3481
3482 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3483 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3484 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3485 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3486
3487 LayerTestResult<float,4> ret(outputTensorInfo);
3488
3489 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3490 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3491
3492 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3493 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3494
3495 armnn::BatchNormalizationQueueDescriptor data;
3496 armnn::WorkloadInfo info;
3497 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3498 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3499 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3500 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3501
3502 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3503 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3504 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3505 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3506
3507 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3508 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3509 data.m_Mean = &meanTensor;
3510 data.m_Variance = &varianceTensor;
3511 data.m_Beta = &betaTensor;
3512 data.m_Gamma = &gammaTensor;
3513 data.m_Parameters.m_Eps = 0.01f;
3514
3515 armnn::BatchNormalizationQueueDescriptor refData = data;
3516 armnn::WorkloadInfo refInfo = info;
3517 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3518 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3519
3520 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3521 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3522
3523 inputHandle->Allocate();
3524 outputHandle->Allocate();
3525 inputHandleRef->Allocate();
3526 outputHandleRef->Allocate();
3527
3528 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3529 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3530
Derek Lambertif30f7d32019-04-09 10:25:02 +01003531 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003532 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003533 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003534 workloadRef->Execute();
3535
3536 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3537 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3538
3539 return ret;
3540}
3541
surmeh013537c2c2018-05-18 16:31:43 +01003542template<typename T>
3543void PermuteTensorData(
3544 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003546 const armnn::PermutationVector& mappings,
3547 armnn::TensorInfo & inputTensorInfo,
3548 const T * inputData,
3549 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003550{
surmeh013537c2c2018-05-18 16:31:43 +01003551 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3552 if (inputData == nullptr)
3553 {
3554 // Nullptr is an error in the test. By returning without doing the concatenation
3555 // I expect the caller to fail the test. It still makes sense to report this as
3556 // an assert for Debug builds.
3557 return;
3558 }
telsoa014fcda012018-03-09 14:13:49 +00003559
surmeh013537c2c2018-05-18 16:31:43 +01003560 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3561
3562 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3563 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3564
3565 armnn::PermuteQueueDescriptor queueDescriptor;
3566 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3567 armnn::WorkloadInfo workloadInfo;
3568 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3569 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3570
3571 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3572
3573 inputHandle->Allocate();
3574 outputHandle->Allocate();
3575
3576 CopyDataToITensorHandle(inputHandle.get(), inputData);
3577
Derek Lambertif30f7d32019-04-09 10:25:02 +01003578 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003579 workload->Execute();
3580
3581 outputData.resize(outputTensorInfo.GetNumElements());
3582 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3583 inputTensorInfo = outputTensorInfo;
3584}
3585
Jim Flynn825af452019-05-20 12:49:28 +01003586armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003587 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3588 unsigned int concatDim)
3589{
telsoa014fcda012018-03-09 14:13:49 +00003590 std::vector<armnn::TensorShape> shapes;
3591 shapes.reserve(inputTensorInfos.size());
3592 for (const armnn::TensorInfo& it: inputTensorInfos)
3593 {
3594 shapes.push_back(it.GetShape());
3595 }
surmeh013537c2c2018-05-18 16:31:43 +01003596
Jim Flynn825af452019-05-20 12:49:28 +01003597 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3598 shapes.end(),
3599 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003600}
3601
3602//
narpra015cdda352018-11-19 15:30:27 +00003603// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3604// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3605// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003606//
3607
3608bool NeedPermuteForConcat(
3609 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3610 unsigned int concatDim)
3611{
3612 // See note above. Additionally we expect the input shapes to have the
3613 // same number of dimensions.
3614 unsigned int nDimensions = 0;
3615
telsoa01c577f2c2018-08-31 09:22:23 +01003616 // Determine the number of dimensions as well as sanity check them
3617 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003618 for (auto && tensorInfo : inputTensorInfos)
3619 {
3620 if (!nDimensions)
3621 {
3622 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3623 }
3624 else
3625 {
3626 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3627 "Input shapes must have the same number of dimensions");
3628 }
3629 }
3630
narpra015cdda352018-11-19 15:30:27 +00003631 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003632}
3633
3634armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3635{
3636 unsigned int numDims = inputShape.GetNumDimensions();
3637 if (numDims >= 3)
3638 {
3639 // Nothing to do if the inputShape has at least 3 dimensions.
3640 return inputShape;
3641 }
3642
3643 std::vector<unsigned int> newDims(size_t(3), 1u);
3644 unsigned int expandedBy = 3 - numDims;
3645 for (unsigned int i=0; i<numDims; ++i)
3646 {
3647 newDims[expandedBy+i] = inputShape[i];
3648 }
3649 return armnn::TensorShape(3u, &newDims[0]);
3650}
3651
3652void Generate3dPermuteVectorForConcat(
3653 unsigned int numDimensions,
3654 unsigned int & concatDim,
3655 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3656{
3657 BOOST_ASSERT_MSG(numDimensions <= 3,
3658 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003659 unsigned int expandedBy = 3 - numDimensions;
3660 unsigned int expandedConcatAxis = concatDim + expandedBy;
3661
3662 if (expandedConcatAxis == 2)
3663 {
3664 concatDim = 0;
3665 armnn::PermutationVector forwardPermutation({1, 2, 0});
3666 armnn::PermutationVector reversePermutation({2, 0, 1});
3667 permutations = std::make_pair(forwardPermutation, reversePermutation);
3668 }
3669 else if (expandedConcatAxis == 1)
3670 {
3671 concatDim = 0;
3672 armnn::PermutationVector forwardPermutation({2, 0, 1});
3673 armnn::PermutationVector reversePermutation({1, 2, 0});
3674 permutations = std::make_pair(forwardPermutation, reversePermutation);
3675 }
3676 else
3677 {
3678 BOOST_ASSERT(expandedConcatAxis == 0);
3679 concatDim = 0;
3680 }
3681}
3682
3683//
3684// Permute the input tensors so we can do a supported concatenation.
3685// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3686// at the front. Finally this function tells what the output shape
3687// of the permuted concatenated tensor is going to be.
3688//
3689template <typename T>
3690void PermuteInputsForConcat(
3691 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003693 std::vector<armnn::TensorInfo> & inputTensorInfos,
3694 std::vector<T *> & inputData,
3695 std::vector<std::vector<T>> & inputDataStorage,
3696 armnn::PermutationVector & permuteVector,
3697 unsigned int & concatDim,
3698 armnn::TensorInfo & outputTensorInfo)
3699{
3700 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3701 "Expecting more than one tensor to be concatenated here");
3702
3703 unsigned int numDims = 0;
3704 unsigned int nthInput = 0;
3705 const armnn::PermutationVector identity({0, 1, 2});
3706
3707 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3708 std::make_pair(identity, identity);
3709
3710 inputDataStorage.resize(inputData.size());
3711
3712 for (auto && tensorInfo : inputTensorInfos)
3713 {
3714 if (numDims == 0)
3715 {
3716 numDims = tensorInfo.GetShape().GetNumDimensions();
3717 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003718
telsoa01c577f2c2018-08-31 09:22:23 +01003719 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003720 permuteVector = permutations.second;
3721 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3722 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3723 }
3724 else
3725 {
3726 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3727 "All inputs must have the same number of dimensions");
3728 }
3729
3730 armnn::TensorInfo newTensorInfo = tensorInfo;
3731 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3732
3733 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003734 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003735 permutations.first,
3736 newTensorInfo,
3737 inputData[nthInput],
3738 inputDataStorage[nthInput]);
3739
3740 inputData[nthInput] = inputDataStorage[nthInput].data();
3741 inputTensorInfos[nthInput] = newTensorInfo;
3742
3743 ++nthInput;
3744 }
3745
3746 outputTensorInfo.SetShape(
3747 armnnUtils::Permuted(
3748 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3749 permutations.first));
3750}
3751
3752
3753//
3754// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003755// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003756// output.
3757//
3758template <typename T>
3759void PermuteOutputForConcat(
3760 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003762 const armnn::TensorInfo & tensorInfo,
3763 const armnn::PermutationVector & permuteVector,
3764 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3765 T * data)
3766{
3767 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3768 if (data == nullptr)
3769 {
3770 // Nullptr is an error in the test. By returning without doing the permutation
3771 // I expect the caller to fail the test. It still makes sense to report this as
3772 // an assert for Debug builds.
3773 return;
3774 }
3775
3776 armnn::TensorInfo resultTensorInfo = tensorInfo;
3777 std::vector<T> inputData(tensorInfo.GetNumElements());
3778 std::vector<T> outputData;
3779
3780 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3781
3782 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003783 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003784 permuteVector,
3785 resultTensorInfo,
3786 &inputData[0],
3787 outputData);
3788
3789 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3790}
3791
3792template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003793void Concatenate(
3794 armnn::IWorkloadFactory& workloadFactory,
3795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3796 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3797 std::initializer_list<T *> inputsOrig,
3798 const armnn::TensorInfo& outputTensorInfoOrig,
3799 T * output,
narpra015cdda352018-11-19 15:30:27 +00003800 unsigned int concatDim,
3801 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003802{
3803 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3804 if (output == nullptr)
3805 {
3806 // Nullptr is an error in the test. By returning without doing the permutation
3807 // I expect the caller to fail the test. It still makes sense to report this as
3808 // an assert for Debug builds.
3809 return;
3810 }
3811
telsoa01c577f2c2018-08-31 09:22:23 +01003812 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003813 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3814 std::vector<T *> inputs = inputsOrig;
3815 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3816
3817 armnn::PermutationVector permuteVector{0, 1, 2};
3818
telsoa01c577f2c2018-08-31 09:22:23 +01003819 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003820 std::vector<std::vector<T>> tmpInputDataStorage;
3821
3822 const size_t inputCount = inputTensorInfos.size();
3823
3824 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3825
3826 if (needPermuteForConcat)
3827 {
3828 //
3829 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003830 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003831 //
3832 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003833 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003834 inputTensorInfos,
3835 inputs,
3836 tmpInputDataStorage,
3837 permuteVector,
3838 concatDim,
3839 outputTensorInfo);
3840 }
3841
narpra015cdda352018-11-19 15:30:27 +00003842 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003843
3844 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3845 inputHandles.reserve(inputCount);
3846
narpra015cdda352018-11-19 15:30:27 +00003847 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3848
Jim Flynne242f2d2019-05-22 14:24:13 +01003849 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01003850 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00003851 queueDescriptor.m_Parameters = viewsDescriptor;
3852
3853 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003854 {
narpra015cdda352018-11-19 15:30:27 +00003855 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3856 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3857 {
3858 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3859 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3860 }
telsoa014fcda012018-03-09 14:13:49 +00003861
narpra015cdda352018-11-19 15:30:27 +00003862 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003863
narpra015cdda352018-11-19 15:30:27 +00003864 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3865 for (unsigned int i = 0; i < inputCount; ++i)
3866 {
3867 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3868 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3869 subTensorsSupported ?
3870 workloadFactory.CreateSubTensorHandle(*outputHandle,
3871 inputTensorInfo.GetShape(),
3872 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3873 workloadFactory.CreateTensorHandle(inputTensorInfo);
3874
3875 inputHandles.emplace_back(std::move(inputHandle));
3876 }
3877
telsoa014fcda012018-03-09 14:13:49 +00003878 }
narpra015cdda352018-11-19 15:30:27 +00003879 else
3880 {
3881 for (unsigned int i = 0; i < inputCount; ++i)
3882 {
3883 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3884 inputHandles.emplace_back(std::move(inputHandle));
3885 }
3886 }
telsoa014fcda012018-03-09 14:13:49 +00003887
3888 for (unsigned int i = 0; i < inputCount; ++i)
3889 {
surmeh013537c2c2018-05-18 16:31:43 +01003890 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003891 }
3892
3893 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3894
Jim Flynn4ed6c832019-05-20 11:02:46 +01003895 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00003896
3897 for (auto& inputHandle : inputHandles)
3898 {
3899 inputHandle->Allocate();
3900 }
3901
3902 outputHandle->Allocate();
3903
3904 unsigned int nextInputId = 0;
3905 for (auto& inputHandle : inputHandles)
3906 {
surmeh013537c2c2018-05-18 16:31:43 +01003907 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3908 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003909 }
3910
Derek Lambertif30f7d32019-04-09 10:25:02 +01003911 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003912 workload->Execute();
3913
surmeh013537c2c2018-05-18 16:31:43 +01003914 if (needPermuteForConcat)
3915 {
3916 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003917 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003918 outputTensorInfo,
3919 permuteVector,
3920 std::move(outputHandle),
3921 output);
3922 }
3923 else
3924 {
3925 CopyDataFromITensorHandle(output, outputHandle.get());
3926 }
telsoa014fcda012018-03-09 14:13:49 +00003927}
3928
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003929template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003930LayerTestResult<T, 1> Concatenation1dTestImpl(
3931 armnn::IWorkloadFactory& workloadFactory,
3932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3933 float qScale,
3934 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003935{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003936 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003937
3938 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3939 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3940 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3941
Jim Flynncbb66aa2019-05-15 13:03:54 +01003942 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003943
3944 LayerTestResult<T, 1> result(outputTensorInfo);
3945
3946 std::vector<T> output;
3947 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003948 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003949 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3950 { input0.data(), input1.data(), input2.data() },
3951 outputTensorInfo,
3952 output.data(),
3953 0,
3954 true);
telsoa014fcda012018-03-09 14:13:49 +00003955
3956 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3957 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3958 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3959 }));
3960
3961 return result;
3962}
3963
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003964LayerTestResult<float, 1> Concatenation1dTest(
3965 armnn::IWorkloadFactory& workloadFactory,
3966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003967{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003968 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003969}
3970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003971template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003972LayerTestResult<T, 2> Concatenation2dTestImpl(
3973 armnn::IWorkloadFactory& workloadFactory,
3974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003975 const armnn::TensorInfo& outputTensorInfo,
3976 unsigned int dimension,
3977 const float qScale,
3978 const int32_t qOffset)
3979{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003980 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003981
3982 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3983 // Batch 0
3984 1.0f, 2.0f, 3.0f,
3985
3986 // Batch 1
3987 10.0f, 11.0f, 12.0f,
3988 }));
3989
3990 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3991 // Batch 0
3992 4.0f, 5.0f, 6.0f,
3993
3994 // Batch 1
3995 13.0f, 14.0f, 15.0f,
3996 }));
3997
3998 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3999 // Batch 0
4000 7.0f, 8.0f, 9.0f,
4001
4002 // Batch 1
4003 16.0f, 17.0f, 18.0f,
4004 }));
4005
4006 LayerTestResult<T, 2> result(outputTensorInfo);
4007
4008 std::vector<T> output;
4009 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004010 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004011 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4012 { input0.data(), input1.data(), input2.data() },
4013 outputTensorInfo,
4014 output.data(),
4015 dimension,
4016 true);
telsoa014fcda012018-03-09 14:13:49 +00004017
4018 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4019 return result;
4020}
4021
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004022template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004023LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4024 armnn::IWorkloadFactory& workloadFactory,
4025 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4026 float qScale,
4027 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004028{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004029 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004030
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004031 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4032 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4033
telsoa014fcda012018-03-09 14:13:49 +00004034 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4035 // Batch 0
4036 1.0f, 2.0f, 3.0f,
4037
4038 // Batch 1
4039 10.0f, 11.0f, 12.0f,
4040
4041 // Batch 2
4042 4.0f, 5.0f, 6.0f,
4043
4044 // Batch 3
4045 13.0f, 14.0f, 15.0f,
4046
4047 // Batch 4
4048 7.0f, 8.0f, 9.0f,
4049
4050 // Batch 5
4051 16.0f, 17.0f, 18.0f,
4052 }));
4053
4054 return result;
4055}
4056
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004057LayerTestResult<float, 2> Concatenation2dDim0Test(
4058 armnn::IWorkloadFactory& workloadFactory,
4059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004060{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004061 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004062}
4063
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004064template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004065LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4066 armnn::IWorkloadFactory& workloadFactory,
4067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4068 float qScale,
4069 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004070{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004071 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004072
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004073 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4074 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4075
telsoa014fcda012018-03-09 14:13:49 +00004076 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4077 // Batch 0
4078 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4079
4080 // Batch 1
4081 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4082 }));
4083
4084 return result;
4085}
4086
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004087LayerTestResult<float, 2> Concatenation2dDim1Test(
4088 armnn::IWorkloadFactory& workloadFactory,
4089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004090{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004091 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004092}
4093
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004094template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004095LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4096 armnn::IWorkloadFactory& workloadFactory,
4097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4098 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004099 int32_t qOffset)
4100{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004101 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004102 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4103 // Batch 0
4104 1.0f, 2.0f, 3.0f,
4105
4106 // Batch 1
4107 10.0f, 11.0f, 12.0f,
4108 }));
4109
Jim Flynncbb66aa2019-05-15 13:03:54 +01004110 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004111 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4112 // Batch 0
4113 4.0f, 5.0f, 6.0f,
4114
4115 // Batch 1
4116 13.0f, 14.0f, 15.0f,
4117
4118 // Batch 0
4119 7.0f, 8.0f, 9.0f,
4120 }));
4121
Jim Flynncbb66aa2019-05-15 13:03:54 +01004122 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004123 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4124 // Batch 1
4125 16.0f, 17.0f, 18.0f,
4126 }));
4127
Jim Flynncbb66aa2019-05-15 13:03:54 +01004128 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004129 LayerTestResult<T, 2> result(outputTensorInfo);
4130
4131 std::vector<T> output;
4132 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004133 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004134 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4135 { input0.data(), input1.data(), input2.data() },
4136 outputTensorInfo,
4137 output.data(),
4138 0,
4139 true);
telsoa014fcda012018-03-09 14:13:49 +00004140
4141 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4142 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4143 // Batch 0
4144 1.0f, 2.0f, 3.0f,
4145
4146 // Batch 1
4147 10.0f, 11.0f, 12.0f,
4148
4149 // Batch 2
4150 4.0f, 5.0f, 6.0f,
4151
4152 // Batch 3
4153 13.0f, 14.0f, 15.0f,
4154
4155 // Batch 4
4156 7.0f, 8.0f, 9.0f,
4157
4158 // Batch 5
4159 16.0f, 17.0f, 18.0f,
4160 }));
4161
4162 return result;
4163}
4164
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004165LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4166 armnn::IWorkloadFactory& workloadFactory,
4167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004168{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004169 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4170 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004171}
4172
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004173template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004174LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4175 armnn::IWorkloadFactory& workloadFactory,
4176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4177 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004178 int32_t qOffset)
4179{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004180 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004181 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4182 // Batch 0
4183 1.0f, 2.0f, 3.0f,
4184
4185 // Batch 1
4186 10.0f, 11.0f, 12.0f,
4187 }));
4188
Jim Flynncbb66aa2019-05-15 13:03:54 +01004189 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004190 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4191 // Batch 0
4192 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4193
4194 // Batch 1
4195 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4196 }));
4197
Jim Flynncbb66aa2019-05-15 13:03:54 +01004198 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004199 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4200 // Batch 0
4201 9.0f,
4202
4203 // Batch 1
4204 18.0f
4205 }));
4206
Jim Flynncbb66aa2019-05-15 13:03:54 +01004207 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004208 LayerTestResult<T, 2> result(outputTensorInfo);
4209
4210 std::vector<T> output;
4211 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004212 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004213 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4214 { input0.data(), input1.data(), input2.data() },
4215 outputTensorInfo,
4216 output.data(),
4217 1,
4218 true);
telsoa014fcda012018-03-09 14:13:49 +00004219
4220 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4221 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4222 // Batch 0
4223 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4224
4225 // Batch 1
4226 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4227 }));
4228
4229 return result;
4230}
4231
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004232LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4233 armnn::IWorkloadFactory& workloadFactory,
4234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004235{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004236 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4237 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004238}
4239
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004240template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004241LayerTestResult<T, 3> Concatenation3dTestImpl(
4242 armnn::IWorkloadFactory& workloadFactory,
4243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004244 const armnn::TensorInfo& outputTensorInfo,
4245 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004246 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004247 float qScale,
4248 int32_t qOffset)
4249{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004250 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004251
4252 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4253 // Batch 0, Channel 0
4254 1.0f, 2.0f,
4255
4256 // Batch 0, Channel 1
4257 3.0f, 4.0f,
4258
4259 // Batch 0, Channel 2
4260 5.0f, 6.0f,
4261
4262 // Batch 1, Channel 0
4263 19.0f, 20.0f,
4264
4265 // Batch 1, Channel 1
4266 21.0f, 22.0f,
4267
4268 // Batch 1, Channel 2
4269 23.0f, 24.0f
4270 }));
4271
4272 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4273 // Batch 0, Channel 0
4274 7.0f, 8.0f,
4275
4276 // Batch 0, Channel 1
4277 9.0f, 10.0f,
4278
4279 // Batch 0, Channel 2
4280 11.0f, 12.0f,
4281
4282 // Batch 1, Channel 0
4283 25.0f, 26.0f,
4284
4285 // Batch 1, Channel 1
4286 27.0f, 28.0f,
4287
4288 // Batch 1, Channel 2
4289 29.0f, 30.0f
4290 }));
4291
4292 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4293 // Batch 0, Channel 0
4294 13.0f, 14.0f,
4295
4296 // Batch 0, Channel 1
4297 15.0f, 16.0f,
4298
4299 // Batch 0, Channel 2
4300 17.0f, 18.0f,
4301
4302 // Batch 1, Channel 0
4303 31.0f, 32.0f,
4304
4305 // Batch 1, Channel 1
4306 33.0f, 34.0f,
4307
4308 // Batch 1, Channel 2
4309 35.0f, 36.0f
4310 }));
4311
4312 LayerTestResult<T, 3> result(outputTensorInfo);
4313
4314 std::vector<T> output;
4315 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004316 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004317 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4318 { input0.data(), input1.data(), input2.data() },
4319 outputTensorInfo,
4320 output.data(),
4321 dimension,
4322 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004323
4324 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4325 return result;
4326}
4327
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004328template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004329LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4330 armnn::IWorkloadFactory& workloadFactory,
4331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4332 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004333 int32_t qOffset)
4334{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004335 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004336
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004337 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4338 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4339
telsoa014fcda012018-03-09 14:13:49 +00004340 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4341 // Batch 0, Channel 0
4342 1.0f, 2.0f,
4343
4344 // Batch 0, Channel 1
4345 3.0f, 4.0f,
4346
4347 // Batch 0, Channel 2
4348 5.0f, 6.0f,
4349
4350 // Batch 1, Channel 0
4351 19.0f, 20.0f,
4352
4353 // Batch 1, Channel 1
4354 21.0f, 22.0f,
4355
4356 // Batch 1, Channel 2
4357 23.0f, 24.0f,
4358
4359 // Batch 2, Channel 0
4360 7.0f, 8.0f,
4361
4362 // Batch 2, Channel 1
4363 9.0f, 10.0f,
4364
4365 // Batch 2, Channel 2
4366 11.0f, 12.0f,
4367
4368 // Batch 3, Channel 0
4369 25.0f, 26.0f,
4370
4371 // Batch 3, Channel 1
4372 27.0f, 28.0f,
4373
4374 // Batch 3, Channel 2
4375 29.0f, 30.0f,
4376
4377 // Batch 4, Channel 0
4378 13.0f, 14.0f,
4379
4380 // Batch 4, Channel 1
4381 15.0f, 16.0f,
4382
4383 // Batch 4, Channel 2
4384 17.0f, 18.0f,
4385
4386 // Batch 5, Channel 0
4387 31.0f, 32.0f,
4388
4389 // Batch 5, Channel 1
4390 33.0f, 34.0f,
4391
4392 // Batch 5, Channel 2
4393 35.0f, 36.0f
4394 }));
narpra015cdda352018-11-19 15:30:27 +00004395
telsoa014fcda012018-03-09 14:13:49 +00004396 return result;
4397}
4398
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004399LayerTestResult<float, 3> Concatenation3dDim0Test(
4400 armnn::IWorkloadFactory& workloadFactory,
4401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004402{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004403 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004404}
4405
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004406template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004407LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4408 armnn::IWorkloadFactory& workloadFactory,
4409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4410 float qScale,
4411 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004412{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004413 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004414
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004415 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4416 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004417
telsoa014fcda012018-03-09 14:13:49 +00004418 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4419 // Batch 0, Channel 0
4420 1.0f, 2.0f,
4421
4422 // Batch 0, Channel 1
4423 3.0f, 4.0f,
4424
4425 // Batch 0, Channel 2
4426 5.0f, 6.0f,
4427
4428 // Batch 0, Channel 3
4429 7.0f, 8.0f,
4430
4431 // Batch 0, Channel 4
4432 9.0f, 10.0f,
4433
4434 // Batch 0, Channel 5
4435 11.0f, 12.0f,
4436
4437 // Batch 0, Channel 6
4438 13.0f, 14.0f,
4439
4440 // Batch 0, Channel 7
4441 15.0f, 16.0f,
4442
4443 // Batch 0, Channel 8
4444 17.0f, 18.0f,
4445
4446 // Batch 1, Channel 0
4447 19.0f, 20.0f,
4448
4449 // Batch 1, Channel 1
4450 21.0f, 22.0f,
4451
4452 // Batch 1, Channel 2
4453 23.0f, 24.0f,
4454
4455 // Batch 1, Channel 3
4456 25.0f, 26.0f,
4457
4458 // Batch 1, Channel 4
4459 27.0f, 28.0f,
4460
4461 // Batch 1, Channel 5
4462 29.0f, 30.0f,
4463
4464 // Batch 1, Channel 6
4465 31.0f, 32.0f,
4466
4467 // Batch 1, Channel 7
4468 33.0f, 34.0f,
4469
4470 // Batch 1, Channel 8
4471 35.0f, 36.0f
4472 }));
4473
4474 return result;
4475}
4476
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004477LayerTestResult<float, 3> Concatenation3dDim1Test(
4478 armnn::IWorkloadFactory& workloadFactory,
4479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004480{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004481 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004482}
4483
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004484template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004485LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4486 armnn::IWorkloadFactory& workloadFactory,
4487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004488 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004489 float qScale,
4490 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004491{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004492 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004494 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4495 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004496
telsoa014fcda012018-03-09 14:13:49 +00004497 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4498 // Batch 0, Channel 0
4499 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4500
4501 // Batch 0, Channel 1
4502 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4503
4504 // Batch 0, Channel 2
4505 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4506
4507 // Batch 1, Channel 0
4508 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4509
4510 // Batch 1, Channel 1
4511 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4512
4513 // Batch 1, Channel 2
4514 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4515 }));
4516
4517 return result;
4518}
4519
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004520LayerTestResult<float, 3> Concatenation3dDim2Test(
4521 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4523 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004524{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004525 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4526 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004527}
4528
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004529template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004530LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4531 armnn::IWorkloadFactory& workloadFactory,
4532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4533 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004534 int32_t qOffset)
4535{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004536 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004537 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4538 // Batch 0, Channel 0
4539 1.0f, 2.0f,
4540
4541 // Batch 0, Channel 1
4542 3.0f, 4.0f,
4543
4544 // Batch 0, Channel 2
4545 5.0f, 6.0f,
4546
4547 // Batch 1, Channel 0
4548 19.0f, 20.0f,
4549
4550 // Batch 1, Channel 1
4551 21.0f, 22.0f,
4552
4553 // Batch 1, Channel 2
4554 23.0f, 24.0f
4555 }));
4556
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004557 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004558 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4559 // Batch 0, Channel 0
4560 7.0f, 8.0f,
4561
4562 // Batch 0, Channel 1
4563 9.0f, 10.0f,
4564
4565 // Batch 0, Channel 2
4566 11.0f, 12.0f,
4567 }));
4568
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004569 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004570 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4571 // Batch 0, Channel 0
4572 25.0f, 26.0f,
4573
4574 // Batch 0, Channel 1
4575 27.0f, 28.0f,
4576
4577 // Batch 0, Channel 2
4578 29.0f, 30.0f,
4579
4580 // Batch 1, Channel 0
4581 13.0f, 14.0f,
4582
4583 // Batch 1, Channel 1
4584 15.0f, 16.0f,
4585
4586 // Batch 1, Channel 2
4587 17.0f, 18.0f,
4588
4589 // Batch 2, Channel 0
4590 31.0f, 32.0f,
4591
4592 // Batch 2, Channel 1
4593 33.0f, 34.0f,
4594
4595 // Batch 2, Channel 2
4596 35.0f, 36.0f
4597 }));
4598
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004599 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004600 LayerTestResult<T, 3> result(outputTensorInfo);
4601
4602 std::vector<T> output;
4603 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004604 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004605 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4606 { input0.data(), input1.data(), input2.data() },
4607 outputTensorInfo,
4608 output.data(),
4609 0,
4610 true);
telsoa014fcda012018-03-09 14:13:49 +00004611
4612 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4613 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4614 // Batch 0, Channel 0
4615 1.0f, 2.0f,
4616
4617 // Batch 0, Channel 1
4618 3.0f, 4.0f,
4619
4620 // Batch 0, Channel 2
4621 5.0f, 6.0f,
4622
4623 // Batch 1, Channel 0
4624 19.0f, 20.0f,
4625
4626 // Batch 1, Channel 1
4627 21.0f, 22.0f,
4628
4629 // Batch 1, Channel 2
4630 23.0f, 24.0f,
4631
4632 // Batch 2, Channel 0
4633 7.0f, 8.0f,
4634
4635 // Batch 2, Channel 1
4636 9.0f, 10.0f,
4637
4638 // Batch 2, Channel 2
4639 11.0f, 12.0f,
4640
4641 // Batch 3, Channel 0
4642 25.0f, 26.0f,
4643
4644 // Batch 3, Channel 1
4645 27.0f, 28.0f,
4646
4647 // Batch 3, Channel 2
4648 29.0f, 30.0f,
4649
4650 // Batch 4, Channel 0
4651 13.0f, 14.0f,
4652
4653 // Batch 4, Channel 1
4654 15.0f, 16.0f,
4655
4656 // Batch 4, Channel 2
4657 17.0f, 18.0f,
4658
4659 // Batch 5, Channel 0
4660 31.0f, 32.0f,
4661
4662 // Batch 5, Channel 1
4663 33.0f, 34.0f,
4664
4665 // Batch 5, Channel 2
4666 35.0f, 36.0f
4667 }));
4668
4669 return result;
4670}
4671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004672LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4673 armnn::IWorkloadFactory& workloadFactory,
4674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004675{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004676 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4677 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004678}
4679
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004680template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004681LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4682 armnn::IWorkloadFactory& workloadFactory,
4683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4684 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004685 int32_t qOffset)
4686{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004687 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004688 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4689 // Batch 0, Channel 0
4690 1.0f, 2.0f,
4691
4692 // Batch 0, Channel 1
4693 3.0f, 4.0f,
4694
4695 // Batch 0, Channel 2
4696 5.0f, 6.0f,
4697
4698 // Batch 1, Channel 0
4699 19.0f, 20.0f,
4700
4701 // Batch 1, Channel 1
4702 21.0f, 22.0f,
4703
4704 // Batch 1, Channel 2
4705 23.0f, 24.0f
4706 }));
4707
Jim Flynncbb66aa2019-05-15 13:03:54 +01004708 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004709 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4710 // Batch 0, Channel 0
4711 7.0f, 8.0f,
4712
4713 // Batch 0, Channel 1
4714 9.0f, 10.0f,
4715
4716 // Batch 0, Channel 2
4717 11.0f, 12.0f,
4718
4719 // Batch 0, Channel 3
4720 25.0f, 26.0f,
4721
4722 // Batch 1, Channel 0
4723 27.0f, 28.0f,
4724
4725 // Batch 1, Channel 1
4726 29.0f, 30.0f,
4727
4728 // Batch 1, Channel 2
4729 13.0f, 14.0f,
4730
4731 // Batch 1, Channel 3
4732 15.0f, 16.0f,
4733 }));
4734
Jim Flynncbb66aa2019-05-15 13:03:54 +01004735 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004736 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4737 // Batch 0, Channel 0
4738 17.0f, 18.0f,
4739
4740 // Batch 1, Channel 0
4741 31.0f, 32.0f,
4742 }));
4743
Jim Flynncbb66aa2019-05-15 13:03:54 +01004744 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004745 LayerTestResult<T, 3> result(outputTensorInfo);
4746
4747 std::vector<T> output;
4748 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004749 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004750 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4751 { input0.data(), input1.data(), input2.data() },
4752 outputTensorInfo,
4753 output.data(),
4754 1,
4755 true);
telsoa014fcda012018-03-09 14:13:49 +00004756
4757 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4758 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4759 // Batch 0, Channel 0
4760 1.0f, 2.0f,
4761
4762 // Batch 0, Channel 1
4763 3.0f, 4.0f,
4764
4765 // Batch 0, Channel 2
4766 5.0f, 6.0f,
4767
4768 // Batch 0, Channel 3
4769 7.0f, 8.0f,
4770
4771 // Batch 0, Channel 4
4772 9.0f, 10.0f,
4773
4774 // Batch 0, Channel 5
4775 11.0f, 12.0f,
4776
4777 // Batch 0, Channel 6
4778 25.0f, 26.0f,
4779
4780 // Batch 0, Channel 7
4781 17.0f, 18.0f,
4782
4783 // Batch 1, Channel 0
4784 19.0f, 20.0f,
4785
4786 // Batch 1, Channel 1
4787 21.0f, 22.0f,
4788
4789 // Batch 1, Channel 2
4790 23.0f, 24.0f,
4791
4792 // Batch 1, Channel 3
4793 27.0f, 28.0f,
4794
4795 // Batch 1, Channel 4
4796 29.0f, 30.0f,
4797
4798 // Batch 1, Channel 5
4799 13.0f, 14.0f,
4800
4801 // Batch 1, Channel 6
4802 15.0f, 16.0f,
4803
4804 // Batch 1, Channel 7
4805 31.0f, 32.0f,
4806 }));
4807
4808 return result;
4809}
4810
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004811LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4812 armnn::IWorkloadFactory& workloadFactory,
4813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004815 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4816 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004817}
4818
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004819template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004820LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4821 armnn::IWorkloadFactory& workloadFactory,
4822 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004823 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004824 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004825 int32_t qOffset)
4826{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004827 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004828 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4829 // Batch 0, Channel 0
4830 1.0f, 2.0f,
4831
4832 // Batch 0, Channel 1
4833 3.0f, 4.0f,
4834
4835 // Batch 0, Channel 2
4836 5.0f, 6.0f,
4837
4838 // Batch 1, Channel 0
4839 19.0f, 20.0f,
4840
4841 // Batch 1, Channel 1
4842 21.0f, 22.0f,
4843
4844 // Batch 1, Channel 2
4845 23.0f, 24.0f
4846 }));
4847
Jim Flynncbb66aa2019-05-15 13:03:54 +01004848 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004849 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4850 // Batch 0, Channel 0
4851 7.0f,
4852
4853 // Batch 0, Channel 1
4854 9.0f,
4855
4856 // Batch 0, Channel 2
4857 11.0f,
4858
4859 // Batch 1, Channel 0
4860 25.0f,
4861
4862 // Batch 1, Channel 1
4863 27.0f,
4864
4865 // Batch 1, Channel 2
4866 29.0f
4867 }));
4868
Jim Flynncbb66aa2019-05-15 13:03:54 +01004869 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004870 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4871 // Batch 0, Channel 0
4872 13.0f, 14.0f, 50.0f,
4873
4874 // Batch 0, Channel 1
4875 15.0f, 16.0f, 51.0f,
4876
4877 // Batch 0, Channel 2
4878 17.0f, 18.0f, 52.0f,
4879
4880 // Batch 1, Channel 0
4881 31.0f, 32.0f, 53.0f,
4882
4883 // Batch 1, Channel 1
4884 33.0f, 34.0f, 54.0f,
4885
4886 // Batch 1, Channel 2
4887 35.0f, 36.0f, 55.0f,
4888 }));
4889
Jim Flynncbb66aa2019-05-15 13:03:54 +01004890 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004891 LayerTestResult<T, 3> result(outputTensorInfo);
4892
4893 std::vector<T> output;
4894 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004895 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004896 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4897 { input0.data(), input1.data(), input2.data() },
4898 outputTensorInfo,
4899 output.data(),
4900 2,
4901 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004902
4903 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4904 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4905 // Batch 0, Channel 0
4906 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4907
4908 // Batch 0, Channel 1
4909 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4910
4911 // Batch 0, Channel 2
4912 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4913
4914 // Batch 1, Channel 0
4915 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4916
4917 // Batch 1, Channel 1
4918 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4919
4920 // Batch 1, Channel 2
4921 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4922 }));
4923
4924 return result;
4925}
4926
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004927LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4928 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4930 bool useSubtensor)
4931{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004932 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4933 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004934}
4935
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004936template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004937LayerTestResult<T, 4> Concatenation4dTestImpl(
4938 armnn::IWorkloadFactory& workloadFactory,
4939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4940 const armnn::TensorInfo& outputTensorInfo,
4941 unsigned int dimension,
4942 bool useSubtensor,
4943 float qScale,
4944 int32_t qOffset)
4945{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004946 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004947
4948 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4949 1.0f, 2.0f,
4950 3.0f, 4.0f,
4951 5.0f, 6.0f,
4952 7.0f, 8.0f,
4953 9.0f, 10.0f,
4954 11.0f, 12.0f
4955 }));
4956
4957 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4958 11.0f, 12.0f,
4959 13.0f, 14.0f,
4960 15.0f, 16.0f,
4961 17.0f, 18.0f,
4962 19.0f, 20.0f,
4963 21.0f, 22.0f
4964 }));
4965
4966 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4967 21.0f, 22.0f,
4968 23.0f, 24.0f,
4969 25.0f, 26.0f,
4970 27.0f, 28.0f,
4971 29.0f, 30.0f,
4972 31.0f, 32.0f
4973 }));
4974
4975 LayerTestResult<T, 4> result(outputTensorInfo);
4976
4977 std::vector<T> output;
4978 output.resize(outputTensorInfo.GetNumElements());
4979
4980 Concatenate<T>(workloadFactory,
4981 memoryManager,
4982 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4983 {input0.data(), input1.data(), input2.data()},
4984 outputTensorInfo,
4985 output.data(),
4986 dimension,
4987 useSubtensor);
4988
4989 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4990 return result;
4991}
4992
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004993template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004994LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4995 armnn::IWorkloadFactory& workloadFactory,
4996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4997 float qScale,
4998 int32_t qOffset)
4999{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005000 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005001
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005002 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5003 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5004
narpra015cdda352018-11-19 15:30:27 +00005005 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5006 1.0f, 2.0f,
5007 3.0f, 4.0f,
5008 5.0f, 6.0f,
5009 7.0f, 8.0f,
5010 9.0f, 10.0f,
5011 11.0f, 12.0f,
5012
5013 11.0f, 12.0f,
5014 13.0f, 14.0f,
5015 15.0f, 16.0f,
5016 17.0f, 18.0f,
5017 19.0f, 20.0f,
5018 21.0f, 22.0f,
5019
5020 21.0f, 22.0f,
5021 23.0f, 24.0f,
5022 25.0f, 26.0f,
5023 27.0f, 28.0f,
5024 29.0f, 30.0f,
5025 31.0f, 32.0f
5026 }));
5027 return result;
5028}
5029
5030LayerTestResult<float, 4> Concatenation4dDim0Test(
5031 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005032 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005033{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005034 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005035}
5036
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005037template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005038LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5039 armnn::IWorkloadFactory& workloadFactory,
5040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5041 float qScale,
5042 int32_t qOffset)
5043{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005044 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005045
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005046 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5047 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5048
narpra015cdda352018-11-19 15:30:27 +00005049 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5050 1.0f, 2.0f,
5051 3.0f, 4.0f,
5052 5.0f, 6.0f,
5053 7.0f, 8.0f,
5054 9.0f, 10.0f,
5055 11.0f, 12.0f,
5056
5057 11.0f, 12.0f,
5058 13.0f, 14.0f,
5059 15.0f, 16.0f,
5060 17.0f, 18.0f,
5061 19.0f, 20.0f,
5062 21.0f, 22.0f,
5063
5064 21.0f, 22.0f,
5065 23.0f, 24.0f,
5066 25.0f, 26.0f,
5067 27.0f, 28.0f,
5068 29.0f, 30.0f,
5069 31.0f, 32.0f
5070 }));
5071
5072 return result;
5073}
5074
5075LayerTestResult<float, 4> Concatenation4dDim1Test(
5076 armnn::IWorkloadFactory& workloadFactory,
5077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5078{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005079 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005080}
5081
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005082template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005083LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5084 armnn::IWorkloadFactory& workloadFactory,
5085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5086 float qScale,
5087 int32_t qOffset)
5088{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005089 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005090
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005091 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5092 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5093
narpra015cdda352018-11-19 15:30:27 +00005094 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5095 1.0f, 2.0f,
5096 3.0f, 4.0f,
5097 11.0f, 12.0f,
5098 13.0f, 14.0f,
5099 21.0f, 22.0f,
5100 23.0f, 24.0f,
5101
5102 5.0f, 6.0f,
5103 7.0f, 8.0f,
5104 15.0f, 16.0f,
5105 17.0f, 18.0f,
5106 25.0f, 26.0f,
5107 27.0f, 28.0f,
5108
5109 9.0f, 10.0f,
5110 11.0f, 12.0f,
5111 19.0f, 20.0f,
5112 21.0f, 22.0f,
5113 29.0f, 30.0f,
5114 31.0f, 32.0f
5115 }));
5116
5117 return result;
5118}
5119
5120LayerTestResult<float, 4> Concatenation4dDim2Test(
5121 armnn::IWorkloadFactory& workloadFactory,
5122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5123{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005124 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005125}
5126
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005127template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005128LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5129 armnn::IWorkloadFactory& workloadFactory,
5130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5131 float qScale,
5132 int32_t qOffset,
5133 bool useSubtensor)
5134{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005135 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005136
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005137 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5138 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5139
narpra015cdda352018-11-19 15:30:27 +00005140 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5141 1.0f, 2.0f,
5142 11.0f, 12.0f,
5143 21.0f, 22.0f,
5144 3.0f, 4.0f,
5145 13.0f, 14.0f,
5146 23.0f, 24.0f,
5147
5148 5.0f, 6.0f,
5149 15.0f, 16.0f,
5150 25.0f, 26.0f,
5151 7.0f, 8.0f,
5152 17.0f, 18.0f,
5153 27.0f, 28.0f,
5154
5155 9.0f, 10.0f,
5156 19.0f, 20.0f,
5157 29.0f, 30.0f,
5158 11.0f, 12.0f,
5159 21.0f, 22.0f,
5160 31.0f, 32.0f
5161 }));
5162
5163 return result;
5164}
5165
5166LayerTestResult<float, 4> Concatenation4dDim3Test(
5167 armnn::IWorkloadFactory& workloadFactory,
5168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5169 bool useSubtensor)
5170{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005171 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5172 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005173}
5174
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005175template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005176LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5177 armnn::IWorkloadFactory& workloadFactory,
5178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5179 float qScale,
5180 int32_t qOffset)
5181{
5182 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005183 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005184
5185 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5186 1.0f, 2.0f,
5187 3.0f, 4.0f,
5188 5.0f, 6.0f,
5189 7.0f, 8.0f,
5190 9.0f, 10.0f,
5191 11.0f, 12.0f
5192 }));
5193
Jim Flynncbb66aa2019-05-15 13:03:54 +01005194 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005195
5196 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5197 11.0f, 12.0f,
5198 13.0f, 14.0f,
5199 15.0f, 16.0f,
5200 17.0f, 18.0f,
5201 19.0f, 20.0f,
5202 21.0f, 22.0f,
5203
5204 21.0f, 22.0f,
5205 23.0f, 24.0f,
5206 25.0f, 26.0f,
5207 27.0f, 28.0f,
5208 29.0f, 30.0f,
5209 31.0f, 32.0f
5210
5211 }));
5212
Jim Flynncbb66aa2019-05-15 13:03:54 +01005213 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005214
5215 LayerTestResult<T, 4> result(outputTensorInfo);
5216
5217 std::vector<T> output;
5218 output.resize(outputTensorInfo.GetNumElements());
5219 Concatenate<T>(workloadFactory,
5220 memoryManager,
5221 {inputTensorInfo0, inputTensorInfo1},
5222 {input0.data(), input1.data()},
5223 outputTensorInfo,
5224 output.data(),
5225 dimension,
5226 true);
5227
5228 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5229 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5230 1.0f, 2.0f,
5231 3.0f, 4.0f,
5232 5.0f, 6.0f,
5233 7.0f, 8.0f,
5234 9.0f, 10.0f,
5235 11.0f, 12.0f,
5236
5237 11.0f, 12.0f,
5238 13.0f, 14.0f,
5239 15.0f, 16.0f,
5240 17.0f, 18.0f,
5241 19.0f, 20.0f,
5242 21.0f, 22.0f,
5243
5244 21.0f, 22.0f,
5245 23.0f, 24.0f,
5246 25.0f, 26.0f,
5247 27.0f, 28.0f,
5248 29.0f, 30.0f,
5249 31.0f, 32.0f
5250 }));
5251
5252 return result;
5253}
5254
5255LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5256 armnn::IWorkloadFactory& workloadFactory,
5257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5258{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005259 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5260 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005261}
5262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005263template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005264LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5265 armnn::IWorkloadFactory& workloadFactory,
5266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5267 float qScale,
5268 int32_t qOffset)
5269{
5270 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005271 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005272
5273 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5274 1.0f, 2.0f,
5275 3.0f, 4.0f,
5276 5.0f, 6.0f,
5277 7.0f, 8.0f,
5278 9.0f, 10.0f,
5279 11.0f, 12.0f
5280 }));
5281
Jim Flynncbb66aa2019-05-15 13:03:54 +01005282 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005283
5284 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5285 11.0f, 12.0f,
5286 13.0f, 14.0f,
5287 15.0f, 16.0f,
5288 17.0f, 18.0f,
5289
5290 }));
5291
Jim Flynncbb66aa2019-05-15 13:03:54 +01005292 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005293
5294 LayerTestResult<T, 4> result(outputTensorInfo);
5295
5296 std::vector<T> output;
5297 output.resize(outputTensorInfo.GetNumElements());
5298 Concatenate<T>(workloadFactory,
5299 memoryManager,
5300 {inputTensorInfo0, inputTensorInfo1},
5301 {input0.data(), input1.data()},
5302 outputTensorInfo,
5303 output.data(),
5304 dimension,
5305 true);
5306
5307 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5308 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5309 1.0f, 2.0f,
5310 3.0f, 4.0f,
5311 5.0f, 6.0f,
5312 7.0f, 8.0f,
5313 9.0f, 10.0f,
5314 11.0f, 12.0f,
5315 11.0f, 12.0f,
5316 13.0f, 14.0f,
5317 15.0f, 16.0f,
5318 17.0f, 18.0f
5319 }));
5320
5321 return result;
5322}
5323
5324LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5325 armnn::IWorkloadFactory& workloadFactory,
5326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5327{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005328 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5329 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005330}
5331
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005332template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005333LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5334 armnn::IWorkloadFactory& workloadFactory,
5335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5336 float qScale,
5337 int32_t qOffset)
5338{
5339 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005340 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005341
5342 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5343 1.0f, 2.0f,
5344 3.0f, 4.0f,
5345 5.0f, 6.0f,
5346 7.0f, 8.0f,
5347 9.0f, 10.0f,
5348 11.0f, 12.0f
5349 }));
5350
Jim Flynncbb66aa2019-05-15 13:03:54 +01005351 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005352
5353 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5354 11.0f, 12.0f,
5355 13.0f, 14.0f,
5356 15.0f, 16.0f,
5357 17.0f, 18.0f,
5358 19.0f, 20.0f,
5359 21.0f, 22.0f,
5360 23.0f, 24.0f,
5361 25.0f, 26.0f,
5362 27.0f, 28.0f
5363 }));
5364
Jim Flynncbb66aa2019-05-15 13:03:54 +01005365 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005366
5367 LayerTestResult<T, 4> result(outputTensorInfo);
5368
5369 std::vector<T> output;
5370 output.resize(outputTensorInfo.GetNumElements());
5371 Concatenate<T>(workloadFactory,
5372 memoryManager,
5373 {inputTensorInfo0, inputTensorInfo1},
5374 {input0.data(), input1.data()},
5375 outputTensorInfo,
5376 output.data(),
5377 dimension,
5378 true);
5379
5380 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5381 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5382 1.0f, 2.0f,
5383 3.0f, 4.0f,
5384 11.0f, 12.0f,
5385 13.0f, 14.0f,
5386 15.0f, 16.0f,
5387
5388 5.0f, 6.0f,
5389 7.0f, 8.0f,
5390 17.0f, 18.0f,
5391 19.0f, 20.0f,
5392 21.0f, 22.0f,
5393
5394 9.0f, 10.0f,
5395 11.0f, 12.0f,
5396 23.0f, 24.0f,
5397 25.0f, 26.0f,
5398 27.0f, 28.0f
5399 }));
5400
5401 return result;
5402}
5403
5404LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5405 armnn::IWorkloadFactory& workloadFactory,
5406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005408 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5409 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005410}
5411
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005412template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005413LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5414 armnn::IWorkloadFactory& workloadFactory,
5415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5416 float qScale,
5417 int32_t qOffset,
5418 bool useSubtensor)
5419{
5420 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005421 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005422
5423 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5424 1.0f, 2.0f,
5425 3.0f, 4.0f,
5426 5.0f, 6.0f,
5427 7.0f, 8.0f,
5428 9.0f, 10.0f,
5429 11.0f, 12.0f
5430 }));
5431
Jim Flynncbb66aa2019-05-15 13:03:54 +01005432 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005433
5434 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5435 11.0f, 12.0f, 13.0f,
5436 14.0f, 15.0f, 16.0f,
5437
5438 17.0f, 18.0f, 19.0f,
5439 20.0f, 21.0f, 22.0f,
5440
5441 23.0f, 24.0f, 25.0f,
5442 26.0f, 27.0f, 28.0f
5443 }));
5444
Jim Flynncbb66aa2019-05-15 13:03:54 +01005445 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005446
5447 LayerTestResult<T, 4> result(outputTensorInfo);
5448
5449 std::vector<T> output;
5450 output.resize(outputTensorInfo.GetNumElements());
5451 Concatenate<T>(workloadFactory,
5452 memoryManager,
5453 {inputTensorInfo0, inputTensorInfo1},
5454 {input0.data(), input1.data()},
5455 outputTensorInfo,
5456 output.data(),
5457 dimension,
5458 useSubtensor);
5459
5460 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5461 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5462 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5463 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5464 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5465 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5466 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5467 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5468 }));
5469
5470 return result;
5471}
5472
5473LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5474 armnn::IWorkloadFactory& workloadFactory,
5475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5476 bool useSubtensor)
5477{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005478 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5479 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005480}
5481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005482LayerTestResult<float, 2> FakeQuantizationTest(
5483 armnn::IWorkloadFactory& workloadFactory,
5484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005485{
5486 constexpr unsigned int width = 2;
5487 constexpr unsigned int height = 3;
5488
5489 const armnn::TensorInfo tensorInfo({height, width },
5490 armnn::DataType::Float32);
5491 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5492 -10.0f, -5.0f,
5493 0.0f, 5.0f,
5494 10.0f, 10.0f
5495 }));
5496
5497 LayerTestResult<float, 2> ret(tensorInfo);
5498
5499 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5500
5501 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5502
5503 armnn::FakeQuantizationQueueDescriptor data;
5504 armnn::WorkloadInfo info;
5505
5506 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5507 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5508 float min = -10.f;
5509 float max = 10.f;
5510
5511 data.m_Parameters.m_Min = min;
5512 data.m_Parameters.m_Max = max;
5513
5514 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5515 armnn::FakeQuantizationQueueDescriptor refData = data;
5516 armnn::WorkloadInfo refInfo = info;
5517 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5518
5519 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5520
5521 inputHandle->Allocate();
5522 outputHandle->Allocate();
5523
5524 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5525
Derek Lambertif30f7d32019-04-09 10:25:02 +01005526 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005527 workload->Execute();
5528
5529 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5530
5531 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5532 0.0f, 63.0f,
5533 128.0f, 191.0f,
5534 255.0f, 255.0f
5535 }));
5536 return ret;
5537}
5538
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005539namespace
5540{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005541template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5542LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005543 armnn::IWorkloadFactory& workloadFactory,
5544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5545 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005546 float scale,
5547 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005548 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005549 float outScale,
5550 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005551 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01005552 const armnn::DataLayout layout,
5553 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005554{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005555 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
5556 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005557
jimfly013aab7c32018-11-12 13:32:08 +00005558 // at this point if we require it permute the input data
5559 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5560 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005561 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005562 {
5563 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005564 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005565 inputData = tmp;
5566 }
5567
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005568 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
5569 inputTensorInfo.GetQuantizationScale(),
5570 inputTensorInfo.GetQuantizationOffset(),
5571 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005572
jimfly013aab7c32018-11-12 13:32:08 +00005573 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005574 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005575 {
5576 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005577 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
5578 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005579 expectedOutputData = tmp;
5580 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005581
5582 LayerTestResult<T, 4> result(outputTensorInfo);
5583 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
5584 outputTensorInfo.GetQuantizationScale(),
5585 outputTensorInfo.GetQuantizationOffset(),
5586 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005587
5588 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5589 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5590
5591 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01005592 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00005593 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005594 armnn::WorkloadInfo info;
5595
5596 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5597 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5598
5599 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5600
5601 inputHandle->Allocate();
5602 outputHandle->Allocate();
5603
5604 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5605
Derek Lambertif30f7d32019-04-09 10:25:02 +01005606 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005607 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005608
5609 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5610
5611 return result;
5612}
5613
5614float CalcInvL2Norm(std::initializer_list<float> elements)
5615{
5616 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5617 [](float acc, float element) { return acc + element * element; });
5618 return 1.0f / sqrtf(reduction);
5619}
5620
5621} // anonymous namespace
5622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005623template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005624LayerTestResult<T, 2> Pad2dTestCommon(
5625 armnn::IWorkloadFactory& workloadFactory,
5626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5627 float qScale,
5628 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005629{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005630 const armnn::TensorShape inputShape{ 3, 3 };
5631 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005632
Derek Lambertif30f7d32019-04-09 10:25:02 +01005633 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5634 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005635
Derek Lambertif30f7d32019-04-09 10:25:02 +01005636 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005637 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005638 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005639 // Height (3) x Width (3)
5640 4, 8, 6,
5641 7, 4, 4,
5642 3, 2, 4
5643 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005644
Derek Lambertif30f7d32019-04-09 10:25:02 +01005645 std::vector<T> expectedOutputValues(
5646 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005647 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005648 0, 0, 0, 0, 0, 0, 0,
5649 0, 0, 0, 0, 0, 0, 0,
5650 0, 0, 4, 8, 6, 0, 0,
5651 0, 0, 7, 4, 4, 0, 0,
5652 0, 0, 3, 2, 4, 0, 0,
5653 0, 0, 0, 0, 0, 0, 0,
5654 0, 0, 0, 0, 0, 0, 0
5655 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005656
Derek Lambertif30f7d32019-04-09 10:25:02 +01005657 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005658
Derek Lambertif30f7d32019-04-09 10:25:02 +01005659 LayerTestResult<T, 2> result(outputTensorInfo);
5660 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005661
Derek Lambertif30f7d32019-04-09 10:25:02 +01005662 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5663 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005664
Derek Lambertif30f7d32019-04-09 10:25:02 +01005665 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005666
Derek Lambertif30f7d32019-04-09 10:25:02 +01005667 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5668 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5669 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005670
Derek Lambertif30f7d32019-04-09 10:25:02 +01005671 descriptor.m_Parameters.m_PadList = PadList;
5672 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005673
Derek Lambertif30f7d32019-04-09 10:25:02 +01005674 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5675 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005676
Derek Lambertif30f7d32019-04-09 10:25:02 +01005677 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005678
Derek Lambertif30f7d32019-04-09 10:25:02 +01005679 inputHandle->Allocate();
5680 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005681
Derek Lambertif30f7d32019-04-09 10:25:02 +01005682 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005683
Derek Lambertif30f7d32019-04-09 10:25:02 +01005684 workload->PostAllocationConfigure();
5685 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005686
Derek Lambertif30f7d32019-04-09 10:25:02 +01005687 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005688
Derek Lambertif30f7d32019-04-09 10:25:02 +01005689 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005690}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005691
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005692template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005693LayerTestResult<T, 3> Pad3dTestCommon(
5694 armnn::IWorkloadFactory& workloadFactory,
5695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5696 float qScale,
5697 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005698{
5699 const armnn::TensorShape inputShape{ 2, 2, 2 };
5700 const armnn::TensorShape outputShape{ 3, 5, 6 };
5701
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005702 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5703 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005704
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005705 std::vector<T> inputValues(
5706 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005707 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005708 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005709 0, 4,
5710 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005711
5712 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005713 6, 1,
5714 5, 2
5715 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005716
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005717 std::vector<T> expectedOutputValues(
5718 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005719 {
5720
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005721 0, 0, 0, 0, 0, 0,
5722 0, 0, 0, 0, 0, 0,
5723 0, 0, 0, 4, 0, 0,
5724 0, 0, 2, 5, 0, 0,
5725 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005727 0, 0, 0, 0, 0, 0,
5728 0, 0, 0, 0, 0, 0,
5729 0, 0, 6, 1, 0, 0,
5730 0, 0, 5, 2, 0, 0,
5731 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005732
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005733 0, 0, 0, 0, 0, 0,
5734 0, 0, 0, 0, 0, 0,
5735 0, 0, 0, 0, 0, 0,
5736 0, 0, 0, 0, 0, 0,
5737 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005738
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005739 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005740
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005741 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005743 LayerTestResult<T, 3> result(outputTensorInfo);
5744 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005745
5746 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5747 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5748
5749 armnn::PadQueueDescriptor descriptor;
5750
5751 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5752 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5753 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5754 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5755
5756 descriptor.m_Parameters.m_PadList = PadList;
5757 armnn::WorkloadInfo info;
5758
5759 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5760 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5761
5762 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5763
5764 inputHandle->Allocate();
5765 outputHandle->Allocate();
5766
5767 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5768
Derek Lambertif30f7d32019-04-09 10:25:02 +01005769 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005770 workload->Execute();
5771
5772 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5773
5774 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005775}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005776
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005777template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005778LayerTestResult<T, 4> Pad4dTestCommon(
5779 armnn::IWorkloadFactory& workloadFactory,
5780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5781 float qScale,
5782 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005783{
5784 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5785 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5786
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005787 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5788 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005789
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005790 std::vector<T> inputValues(
5791 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005792 {
5793 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005794 0, 1,
5795 2, 3,
5796 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005797
5798 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005799 6, 7,
5800 8, 9,
5801 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005802
5803 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005804 12, 13,
5805 14, 15,
5806 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005807
5808 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005809 18, 19,
5810 20, 21,
5811 22, 23
5812 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005813
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005814 std::vector<T> expectedOutputValues(
5815 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005816 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005817 0, 0, 0, 0,
5818 0, 0, 0, 0,
5819 0, 0, 0, 0,
5820 0, 0, 0, 0,
5821 0, 0, 0, 0,
5822 0, 0, 0, 0,
5823 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005824
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005825 0, 0, 0, 0,
5826 0, 0, 0, 0,
5827 0, 0, 0, 0,
5828 0, 0, 0, 0,
5829 0, 0, 0, 0,
5830 0, 0, 0, 0,
5831 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005832
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005833 0, 0, 0, 0,
5834 0, 0, 0, 0,
5835 0, 0, 0, 0,
5836 0, 0, 0, 0,
5837 0, 0, 0, 0,
5838 0, 0, 0, 0,
5839 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005840
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005841 0, 0, 0, 0,
5842 0, 0, 0, 0,
5843 0, 0, 0, 0,
5844 0, 0, 0, 0,
5845 0, 0, 0, 0,
5846 0, 0, 0, 0,
5847 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005848
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005849 0, 0, 0, 0,
5850 0, 0, 0, 0,
5851 0, 0, 0, 0,
5852 0, 0, 0, 0,
5853 0, 0, 0, 0,
5854 0, 0, 0, 0,
5855 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005856
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005857 0, 0, 0, 0,
5858 0, 0, 0, 0,
5859 0, 0, 0, 0,
5860 0, 0, 0, 0,
5861 0, 0, 0, 0,
5862 0, 0, 0, 0,
5863 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005864
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005865 0, 0, 0, 0,
5866 0, 0, 0, 0,
5867 0, 0, 0, 0,
5868 0, 0, 0, 0,
5869 0, 0, 0, 0,
5870 0, 0, 0, 0,
5871 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005872
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005873 0, 0, 0, 0,
5874 0, 0, 0, 0,
5875 0, 0, 0, 0,
5876 0, 0, 1, 0,
5877 0, 2, 3, 0,
5878 0, 4, 5, 0,
5879 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005880
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005881 0, 0, 0, 0,
5882 0, 0, 0, 0,
5883 0, 0, 0, 0,
5884 0, 6, 7, 0,
5885 0, 8, 9, 0,
5886 0, 10, 11, 0,
5887 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005888
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
5892 0, 0, 0, 0,
5893 0, 0, 0, 0,
5894 0, 0, 0, 0,
5895 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005896
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
5900 0, 0, 0, 0,
5901 0, 0, 0, 0,
5902 0, 0, 0, 0,
5903 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005904
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 0, 0, 0,
5908 0, 0, 0, 0,
5909 0, 0, 0, 0,
5910 0, 0, 0, 0,
5911 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005912
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 0, 0, 0,
5916 0, 12, 13, 0,
5917 0, 14, 15, 0,
5918 0, 16, 17, 0,
5919 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005920
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
5924 0, 18, 19, 0,
5925 0, 20, 21, 0,
5926 0, 22, 23, 0,
5927 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005928
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 0, 0,
5932 0, 0, 0, 0,
5933 0, 0, 0, 0,
5934 0, 0, 0, 0,
5935 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005936
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 0, 0, 0,
5940 0, 0, 0, 0,
5941 0, 0, 0, 0,
5942 0, 0, 0, 0,
5943 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005944
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005945 0, 0, 0, 0,
5946 0, 0, 0, 0,
5947 0, 0, 0, 0,
5948 0, 0, 0, 0,
5949 0, 0, 0, 0,
5950 0, 0, 0, 0,
5951 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005952
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005953 0, 0, 0, 0,
5954 0, 0, 0, 0,
5955 0, 0, 0, 0,
5956 0, 0, 0, 0,
5957 0, 0, 0, 0,
5958 0, 0, 0, 0,
5959 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005960
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
5964 0, 0, 0, 0,
5965 0, 0, 0, 0,
5966 0, 0, 0, 0,
5967 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005968
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005969 0, 0, 0, 0,
5970 0, 0, 0, 0,
5971 0, 0, 0, 0,
5972 0, 0, 0, 0,
5973 0, 0, 0, 0,
5974 0, 0, 0, 0,
5975 0, 0, 0, 0
5976 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005977
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005978 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005979
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005980 LayerTestResult<T, 4> result(outputTensorInfo);
5981 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005982
5983 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5984 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5985
5986 armnn::PadQueueDescriptor descriptor;
5987
5988 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5989 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5990 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5991 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5992 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5993
5994 descriptor.m_Parameters.m_PadList = PadList;
5995 armnn::WorkloadInfo info;
5996
5997 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5998 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5999
6000 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6001
6002 inputHandle->Allocate();
6003 outputHandle->Allocate();
6004
6005 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6006
Derek Lambertif30f7d32019-04-09 10:25:02 +01006007 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006008 workload->Execute();
6009
6010 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6011
6012 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006013}
6014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006015LayerTestResult<uint8_t, 2> PadUint82dTest(
6016 armnn::IWorkloadFactory& workloadFactory,
6017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006018{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006019 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006020}
6021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006022LayerTestResult<uint8_t, 3> PadUint83dTest(
6023 armnn::IWorkloadFactory& workloadFactory,
6024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006025{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006026 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006027}
6028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006029LayerTestResult<uint8_t, 4> PadUint84dTest(
6030 armnn::IWorkloadFactory& workloadFactory,
6031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006032{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006033 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006034}
6035
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006036LayerTestResult<float, 2> PadFloat322dTest(
6037 armnn::IWorkloadFactory& workloadFactory,
6038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006039{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006040 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006041}
6042
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006043LayerTestResult<float, 3> PadFloat323dTest(
6044 armnn::IWorkloadFactory& workloadFactory,
6045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006046{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006047 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006048}
6049
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006050LayerTestResult<float, 4> PadFloat324dTest(
6051 armnn::IWorkloadFactory& workloadFactory,
6052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006053{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006054 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006055}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006056
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006057template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006058LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6059 armnn::IWorkloadFactory& workloadFactory,
6060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6061 float scale,
6062 int32_t offset,
6063 float outScale,
6064 int32_t outOffset,
6065 const armnn::DataLayout layout,
6066 float epsilon)
6067{
6068 // Width: 1
6069 // Height: 1
6070 // Channels: 3
6071 // BatchSize: 1
6072 unsigned int numberOfBatches = 1;
6073 unsigned int numberOfChannels = 3;
6074 unsigned int height = 1;
6075 unsigned int width = 1;
6076
6077 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6078 numberOfBatches, numberOfChannels, height, width, layout);
6079
6080 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6081 std::vector<float> inputValues
6082 {
6083 // Batch 0, Channel 0, Height (1) x Width (1)
6084 0.00000001f,
6085
6086 // Batch 0, Channel 1, Height (1) x Width (1)
6087 0.00000002f,
6088
6089 // Batch 0, Channel 2, Height (1) x Width (1)
6090 0.00000003f,
6091 };
6092
6093 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6094 std::vector<float> expectedOutputValues
6095 {
6096 // Batch 0, Channel 0, Height (1) x Width (1)
6097 0.00000001f * approxInvL2Norm,
6098 0.00000002f * approxInvL2Norm,
6099 0.00000003f * approxInvL2Norm,
6100 };
6101
6102 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6103 inputValues, outScale, outOffset, expectedOutputValues, layout,
6104 epsilon);
6105}
6106
6107
6108template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006109LayerTestResult<T, 4> L2Normalization1dTestCommon(
6110 armnn::IWorkloadFactory& workloadFactory,
6111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006112 float scale,
6113 int32_t offset,
6114 float outScale,
6115 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006116 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006117{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006118 // Width: 1
6119 // Height: 1
6120 // Channels: 10
6121 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006122 unsigned int numberOfBatches = 1;
6123 unsigned int numberOfChannels = 10;
6124 unsigned int height = 1;
6125 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006126
jimfly013aab7c32018-11-12 13:32:08 +00006127
Nina Drozdd41b2592018-11-19 13:03:36 +00006128 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006129 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006130 std::vector<float> inputValues
6131 {
6132 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006133 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006134
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006135 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006136 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006137
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006138 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006139 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006140
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006141 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006142 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006143
6144 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006145 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006146
6147 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006148 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006149
6150 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006151 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006152
6153 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006154 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006155
6156 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006157 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006158
6159 // Batch 0, Channel 9, Height (1) x Width (1)
6160 10.0f
6161 };
telsoa014fcda012018-03-09 14:13:49 +00006162 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006163 std::vector<float> expectedOutputValues
6164 {
6165 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006166 1.0f * approxInvL2Norm,
6167 2.0f * approxInvL2Norm,
6168 3.0f * approxInvL2Norm,
6169 4.0f * approxInvL2Norm,
6170 5.0f * approxInvL2Norm,
6171 6.0f * approxInvL2Norm,
6172 7.0f * approxInvL2Norm,
6173 8.0f * approxInvL2Norm,
6174 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006175 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006176 };
telsoa014fcda012018-03-09 14:13:49 +00006177
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006178
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006179 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6180 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006181}
6182
Ferran Balaguere52211e2019-06-17 12:23:52 +01006183LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6184 armnn::IWorkloadFactory& workloadFactory,
6185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6186 const armnn::DataLayout layout)
6187{
6188 // Dummy descriptor to get the default value of epsilon.
6189 armnn::L2NormalizationDescriptor descriptor;
6190
6191 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6192 layout, descriptor.m_Eps);
6193}
6194
6195LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6196 armnn::IWorkloadFactory& workloadFactory,
6197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6198 const armnn::DataLayout layout)
6199{
6200 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6201 layout, 1e-9f);
6202}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006203
6204LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006205 armnn::IWorkloadFactory& workloadFactory,
6206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006207 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006208{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006209 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006210}
6211
6212LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6213 armnn::IWorkloadFactory& workloadFactory,
6214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6215 const armnn::DataLayout layout)
6216{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006217 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006218 layout);
6219}
6220
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006221LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6222 armnn::IWorkloadFactory& workloadFactory,
6223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6224 const armnn::DataLayout layout)
6225{
6226 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6227 1.f/128, 128, layout);
6228}
6229
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006230template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6231LayerTestResult<T, 4> L2Normalization2dTestCommon(
6232 armnn::IWorkloadFactory& workloadFactory,
6233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006234 float scale,
6235 int32_t offset,
6236 float outScale,
6237 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006238 const armnn::DataLayout layout)
6239{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006240 // Width: 5
6241 // Height: 1
6242 // Channels: 2
6243 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006244 unsigned int numberOfBatches = 1;
6245 unsigned int numberOfChannels = 2;
6246 unsigned int height = 1;
6247 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006248
Nina Drozdd41b2592018-11-19 13:03:36 +00006249 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006250 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006251 std::vector<float> inputValues
6252 {
6253 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006254 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006255
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006256 // Batch 0, Channel 1, Height (1) x Width (5)
6257 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6258 };
6259 std::vector<float> expectedOutputValues
6260 {
6261 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006262 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6263 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6264 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6265 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6266 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006267
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006268 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006269 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6270 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6271 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6272 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006273 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006274 };
telsoa014fcda012018-03-09 14:13:49 +00006275
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006276 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6277 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006278}
telsoa014fcda012018-03-09 14:13:49 +00006279
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006280LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006281 armnn::IWorkloadFactory& workloadFactory,
6282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006283 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006284{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006285 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6286 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006287}
6288
6289LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6290 armnn::IWorkloadFactory& workloadFactory,
6291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6292 const armnn::DataLayout layout)
6293{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006294 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006295 layout);
6296}
6297
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006298LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6299 armnn::IWorkloadFactory& workloadFactory,
6300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6301 const armnn::DataLayout layout)
6302{
6303 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6304 1.f/128, 128, layout);
6305}
6306
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006307template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6308LayerTestResult<T, 4> L2Normalization3dTestCommon(
6309 armnn::IWorkloadFactory& workloadFactory,
6310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006311 float scale,
6312 int32_t offset,
6313 float outScale,
6314 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006315 const armnn::DataLayout layout)
6316{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006317 // Width: 3
6318 // Height: 4
6319 // Channels: 2
6320 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006321 unsigned int numberOfBatches = 1;
6322 unsigned int numberOfChannels = 2;
6323 unsigned int height = 4;
6324 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006325
Nina Drozdd41b2592018-11-19 13:03:36 +00006326 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006327 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006328 std::vector<float> inputValues
6329 {
6330 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006331 119.0f, 21.0f, 150.0f,
6332 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006333 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006334 147.0f, 199.0f, 220.0f,
6335
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006336 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006337 110.0f, 140.0f, 73.0f,
6338 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006339 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006340 162.0f, 12.0f, 161.0f
6341 };
6342 std::vector<float> expectedOutputValues
6343 {
6344 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006345 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006346 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006347 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6348 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006349 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006350 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006351 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006352 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6353 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6354 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6355 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6356 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6357
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006358 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006359 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6360 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006361 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006362 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6363 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006364 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6365 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006366 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6367 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6368 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006369 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006370 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6371 };
telsoa014fcda012018-03-09 14:13:49 +00006372
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006373 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6374 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006375}
telsoa014fcda012018-03-09 14:13:49 +00006376
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006377LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006378 armnn::IWorkloadFactory& workloadFactory,
6379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006380 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006381{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006382 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6383 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006384}
6385
6386LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6387 armnn::IWorkloadFactory& workloadFactory,
6388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6389 const armnn::DataLayout layout)
6390{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006391 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006392 layout);
6393}
6394
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006395LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6396 armnn::IWorkloadFactory& workloadFactory,
6397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6398 const armnn::DataLayout layout)
6399{
6400 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6401 1.f/128, 128, layout);
6402}
6403
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006404template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6405LayerTestResult<T, 4> L2Normalization4dTestCommon(
6406 armnn::IWorkloadFactory& workloadFactory,
6407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006408 float scale,
6409 int32_t offset,
6410 float outScale,
6411 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006412 const armnn::DataLayout layout)
6413{
6414 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006415 // Height: 4
6416 // Channels: 3
6417 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006418 unsigned int numberOfBatches = 2;
6419 unsigned int numberOfChannels = 3;
6420 unsigned int height = 4;
6421 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006422
Nina Drozdd41b2592018-11-19 13:03:36 +00006423 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006424 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006425 std::vector<float> inputValues
6426 {
6427 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006428 235.0f, 46.0f, 178.0f,
6429 100.0f, 123.0f, 19.0f,
6430 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006431 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006432
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006433 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006434 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006435 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006436 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006437 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006438
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006439 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006440 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006441 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006442 12.0f, 209.0f, 200.0f,
6443 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006444
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006445 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006446 67.0f, 90.0f, 49.0f,
6447 7.0f, 163.0f, 18.0f,
6448 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006449 247.0f, 59.0f, 189.0f,
6450
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006451 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006452 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006453 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006454 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006455 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006456
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006457 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006458 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006459 115.0f, 116.0f, 238.0f,
6460 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006461 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006462 };
6463 std::vector<float> expectedOutputValues
6464 {
6465 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006466 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006467 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006468 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6469 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6470 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006471 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006472 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006473 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006474 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006475 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006476 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006477 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006478
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006479 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006480 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006481 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006482 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006483 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006484 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006485 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006486 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6487 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6488 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006489 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6490 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6491 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006492
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006493 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006494 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006495 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6496 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6497 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006498 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006499 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006500 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006501 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6502 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006503 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6504 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6505 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006506
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006507 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006508 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6509 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6510 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6511 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006512 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006513 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6514 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006515 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6516 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6517 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006518 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006519 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6520
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006521 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006522 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6523 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6524 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006525 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006526 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6527 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6528 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6529 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006530 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6531 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006532 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006533 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006534
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006535 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006536 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006537 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6538 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6539 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6540 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6541 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6542 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006543 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006544 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006545 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006546 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006547 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006548 };
telsoa014fcda012018-03-09 14:13:49 +00006549
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006550 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6551 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006552}
6553
6554LayerTestResult<float, 4> L2Normalization4dTest(
6555 armnn::IWorkloadFactory& workloadFactory,
6556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6557 const armnn::DataLayout layout)
6558{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006559 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6560 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006561}
6562
6563LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
6564 armnn::IWorkloadFactory& workloadFactory,
6565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6566 const armnn::DataLayout layout)
6567{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006568 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006569 layout);
telsoa014fcda012018-03-09 14:13:49 +00006570}
6571
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006572LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
6573 armnn::IWorkloadFactory& workloadFactory,
6574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6575 const armnn::DataLayout layout)
6576{
6577 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6578 1.f/128, 128, layout);
6579}
6580
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006581template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006582LayerTestResult<T, 4> ConstantTestImpl(
6583 armnn::IWorkloadFactory& workloadFactory,
6584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006585 float qScale,
6586 int32_t qOffset)
6587{
6588 constexpr unsigned int inputWidth = 3;
6589 constexpr unsigned int inputHeight = 4;
6590 constexpr unsigned int inputChannels = 3;
6591 constexpr unsigned int inputBatchSize = 2;
6592
6593 constexpr unsigned int outputWidth = inputWidth;
6594 constexpr unsigned int outputHeight = inputHeight;
6595 constexpr unsigned int outputChannels = inputChannels;
6596 constexpr unsigned int outputBatchSize = inputBatchSize;
6597
Nina Drozd58ef2c62019-05-16 12:09:18 +01006598 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6599 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006600
Nina Drozd58ef2c62019-05-16 12:09:18 +01006601 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6602 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006603
6604 // Set quantization parameters if the requested type is a quantized type.
6605 if(armnn::IsQuantizedType<T>())
6606 {
6607 inputTensorInfo.SetQuantizationScale(qScale);
6608 inputTensorInfo.SetQuantizationOffset(qOffset);
6609 outputTensorInfo.SetQuantizationScale(qScale);
6610 outputTensorInfo.SetQuantizationOffset(qOffset);
6611 }
6612
6613 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6614 QuantizedVector<T>(qScale, qOffset, {
6615 // Batch 0, Channel 0
6616 235.0f, 46.0f, 178.0f,
6617 100.0f, 123.0f, 19.0f,
6618 172.0f, 74.0f, 250.0f,
6619 6.0f, 195.0f, 80.0f,
6620
6621 // Batch 0, Channel 1
6622 113.0f, 95.0f, 202.0f,
6623 77.0f, 114.0f, 71.0f,
6624 122.0f, 246.0f, 166.0f,
6625 82.0f, 28.0f, 37.0f,
6626
6627 // Batch 0, Channel 2
6628 56.0f, 170.0f, 162.0f,
6629 194.0f, 89.0f, 254.0f,
6630 12.0f, 209.0f, 200.0f,
6631 1.0f, 64.0f, 54.0f,
6632
6633 // Batch 1, Channel 0
6634 67.0f, 90.0f, 49.0f,
6635 7.0f, 163.0f, 18.0f,
6636 25.0f, 117.0f, 103.0f,
6637 247.0f, 59.0f, 189.0f,
6638
6639 // Batch 1, Channel 1
6640 239.0f, 104.0f, 199.0f,
6641 17.0f, 124.0f, 153.0f,
6642 222.0f, 217.0f, 75.0f,
6643 32.0f, 126.0f, 21.0f,
6644
6645 // Batch 1, Channel 2
6646 97.0f, 145.0f, 215.0f,
6647 115.0f, 116.0f, 238.0f,
6648 226.0f, 16.0f, 132.0f,
6649 92.0f, 125.0f, 88.0f,
6650 })));
6651
6652 LayerTestResult<T, 4> result(outputTensorInfo);
6653 result.outputExpected = input;
6654
6655 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6656
6657 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6658 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6659
6660 armnn::ConstantQueueDescriptor descriptor;
6661 descriptor.m_LayerOutput = &constantTensor;
6662
6663 armnn::WorkloadInfo info;
6664 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6665
6666 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6667
6668 outputHandle->Allocate();
6669
Derek Lambertif30f7d32019-04-09 10:25:02 +01006670 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006671 workload->Execute();
6672
6673 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6674 return result;
6675}
6676
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006677LayerTestResult<float, 4> ConstantTest(
6678 armnn::IWorkloadFactory& workloadFactory,
6679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006680{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006681 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006682}
6683
Nina Drozd58ef2c62019-05-16 12:09:18 +01006684LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6685 armnn::IWorkloadFactory& workloadFactory,
6686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6687{
6688 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6689}
6690
6691LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006692 armnn::IWorkloadFactory& workloadFactory,
6693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006694{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006695 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006696}
6697
Jim Flynn4ed6c832019-05-20 11:02:46 +01006698LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006699 armnn::IWorkloadFactory& workloadFactory,
6700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6701{
6702 unsigned int outputWidth = 3;
6703 unsigned int outputHeight = 6;
6704 unsigned int outputChannels = 3;
6705
6706 unsigned int inputWidth1 = 3;
6707 unsigned int inputHeight1 = 6;
6708 unsigned int inputChannels1 = 2;
6709
6710 unsigned int inputWidth2 = 3;
6711 unsigned int inputHeight2 = 6;
6712 unsigned int inputChannels2 = 1;
6713
6714 // Defines the tensor descriptors.
6715 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6716 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6717 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6718
6719 // Quantized input1 tensor. Range [-3, 1]
6720 const float inputScale1 = 0.015686f;
6721 const int32_t inputOffset1 = 192;
6722
6723 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6724 {
6725 1, 2, 3,
6726 4, 5, 6,
6727 7, 8, 9,
6728 10, 11, 12,
6729 13, 14, 15,
6730 16, 17, 18,
6731
6732 19, 20, 21,
6733 22, 23, 24,
6734 25, 26, 27,
6735 28, 29, 30,
6736 31, 32, 33,
6737 34, 35, 36,
6738 })
6739 );
6740
6741 // Quatized input2 tensor. Range [-1, 4]
6742 const float inputScale2 = 0.019608f;
6743 const int32_t inputOffset2 = 50;
6744
6745 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6746 {
6747 37, 38, 39,
6748 40, 41, 42,
6749 43, 44, 45,
6750 46, 47, 48,
6751 49, 50, 51,
6752 52, 53, 54,
6753 })
6754 );
6755
6756 // Output has the same quantization parameters than input1,
6757 // so that only the requantization of input2 is required
6758 const float outputScale = 0.015686f;
6759 const int32_t outputOffset = 192;
6760
6761 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6762
6763 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6764 {
6765 1, 2, 3,
6766 4, 5, 6,
6767 7, 8, 9,
6768 10, 11, 12,
6769 13, 14, 15,
6770 16, 17, 18,
6771
6772 19, 20, 21,
6773 22, 23, 24,
6774 25, 26, 27,
6775 28, 29, 30,
6776 31, 32, 33,
6777 34, 35, 36,
6778
6779 176, 177, 178,
6780 179, 181, 182,
6781 183, 184, 186,
6782 187, 188, 189,
6783 191, 192, 193,
6784 195, 196, 197,
6785 })
6786 );
6787
6788 outputTensorInfo.SetQuantizationScale(outputScale);
6789 outputTensorInfo.SetQuantizationOffset(outputOffset);
6790 inputTensorInfo1.SetQuantizationScale(inputScale1);
6791 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6792 inputTensorInfo2.SetQuantizationScale(inputScale2);
6793 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6794
6795 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006796 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006797
6798 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006799 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006800
6801 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6802
6803 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6804
6805 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6806 subTensorsSupported ?
6807 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6808 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6809
6810 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6811 subTensorsSupported ?
6812 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6813 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6814
Jim Flynne242f2d2019-05-22 14:24:13 +01006815 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00006816 armnn::WorkloadInfo info;
6817 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6818 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6819 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6820
6821 data.m_ViewOrigins.push_back(window1);
6822 data.m_ViewOrigins.push_back(window2);
6823
Jim Flynn4ed6c832019-05-20 11:02:46 +01006824 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006825
6826 inputHandle1->Allocate();
6827 inputHandle2->Allocate();
6828 outputHandle->Allocate();
6829
6830 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6831 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6832
Derek Lambertif30f7d32019-04-09 10:25:02 +01006833 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006834 workload->Execute();
6835
6836 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6837
6838 return ret;
6839}
6840
Jim Flynn4ed6c832019-05-20 11:02:46 +01006841LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006842 armnn::IWorkloadFactory& workloadFactory,
6843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006844{
surmeh013537c2c2018-05-18 16:31:43 +01006845 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006846 unsigned int outputHeight = 6;
6847 unsigned int outputChannels = 3;
6848
surmeh013537c2c2018-05-18 16:31:43 +01006849 unsigned int inputWidth1 = 3;
6850 unsigned int inputHeight1 = 6;
6851 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006852
surmeh013537c2c2018-05-18 16:31:43 +01006853 unsigned int inputWidth2 = 3;
6854 unsigned int inputHeight2 = 6;
6855 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006856
telsoa01c577f2c2018-08-31 09:22:23 +01006857 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006858 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6859 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6860 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006861
Jim Flynn4ed6c832019-05-20 11:02:46 +01006862 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006863 const float scale = 0.13497836f;
6864 const int32_t offset = -7;
6865
6866 outputTensorInfo.SetQuantizationScale(scale);
6867 outputTensorInfo.SetQuantizationOffset(offset);
6868 inputTensorInfo1.SetQuantizationScale(scale);
6869 inputTensorInfo1.SetQuantizationOffset(offset);
6870 inputTensorInfo2.SetQuantizationScale(scale);
6871 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006872
6873 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6874
6875 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006876 {
6877 1, 2, 3,
6878 4, 5, 6,
6879 7, 8, 9,
6880 10, 11, 12,
6881 13, 14, 15,
6882 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006883
surmeh013537c2c2018-05-18 16:31:43 +01006884 19, 20, 21,
6885 22, 23, 24,
6886 25, 26, 27,
6887 28, 29, 30,
6888 31, 32, 33,
6889 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006890
surmeh013537c2c2018-05-18 16:31:43 +01006891 37, 38, 39,
6892 40, 41, 42,
6893 43, 44, 45,
6894 46, 47, 48,
6895 49, 50, 51,
6896 52, 53, 54,
6897 })
telsoa014fcda012018-03-09 14:13:49 +00006898 );
6899
telsoa014fcda012018-03-09 14:13:49 +00006900 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6901 {
surmeh013537c2c2018-05-18 16:31:43 +01006902 1, 2, 3,
6903 4, 5, 6,
6904 7, 8, 9,
6905 10, 11, 12,
6906 13, 14, 15,
6907 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006908
surmeh013537c2c2018-05-18 16:31:43 +01006909 19, 20, 21,
6910 22, 23, 24,
6911 25, 26, 27,
6912 28, 29, 30,
6913 31, 32, 33,
6914 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006915 })
6916 );
6917
6918 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6919 {
surmeh013537c2c2018-05-18 16:31:43 +01006920 37, 38, 39,
6921 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006922 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006923 46, 47, 48,
6924 49, 50, 51,
6925 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006926 })
6927 );
6928
telsoa01c577f2c2018-08-31 09:22:23 +01006929 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006930 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00006931
telsoa01c577f2c2018-08-31 09:22:23 +01006932 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006933 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00006934
telsoa014fcda012018-03-09 14:13:49 +00006935
6936 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6937
6938 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6939
6940 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6941 subTensorsSupported ?
6942 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6943 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6944
6945 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6946 subTensorsSupported ?
6947 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6948 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6949
telsoa014fcda012018-03-09 14:13:49 +00006950
Jim Flynne242f2d2019-05-22 14:24:13 +01006951 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00006952 armnn::WorkloadInfo info;
6953 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6954 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006955 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6956
6957 data.m_ViewOrigins.push_back(window1);
6958 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006959
Jim Flynn4ed6c832019-05-20 11:02:46 +01006960 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00006961
6962 inputHandle1->Allocate();
6963 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006964 outputHandle->Allocate();
6965
6966 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6967 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006968
Derek Lambertif30f7d32019-04-09 10:25:02 +01006969 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006970 workload->Execute();
6971
6972 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6973
6974 return ret;
6975}
6976
Jim Flynn4ed6c832019-05-20 11:02:46 +01006977LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01006978 armnn::IWorkloadFactory& workloadFactory,
6979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6980{
6981 unsigned int outputWidth = 3;
6982 unsigned int outputHeight = 6;
6983 unsigned int outputChannels = 3;
6984
6985 unsigned int inputWidth1 = 3;
6986 unsigned int inputHeight1 = 6;
6987 unsigned int inputChannels1 = 2;
6988
6989 unsigned int inputWidth2 = 3;
6990 unsigned int inputHeight2 = 6;
6991 unsigned int inputChannels2 = 1;
6992
6993 // Defines the tensor descriptors.
6994 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
6995 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
6996 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
6997
Jim Flynn4ed6c832019-05-20 11:02:46 +01006998 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01006999 const float scale = 0.13497836f;
7000 const int32_t offset = -7;
7001
7002 outputTensorInfo.SetQuantizationScale(scale);
7003 outputTensorInfo.SetQuantizationOffset(offset);
7004 inputTensorInfo1.SetQuantizationScale(scale);
7005 inputTensorInfo1.SetQuantizationOffset(offset);
7006 inputTensorInfo2.SetQuantizationScale(scale);
7007 inputTensorInfo2.SetQuantizationOffset(offset);
7008
7009 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7010
7011 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7012 {
7013 1, 2, 3,
7014 4, 5, 6,
7015 7, 8, 9,
7016 10, 11, 12,
7017 13, 14, 15,
7018 16, 17, 18,
7019
7020 19, 20, 21,
7021 22, 23, 24,
7022 25, 26, 27,
7023 28, 29, 30,
7024 31, 32, 33,
7025 34, 35, 36,
7026
7027 37, 38, 39,
7028 40, 41, 42,
7029 43, 44, 45,
7030 46, 47, 48,
7031 49, 50, 51,
7032 52, 53, 54,
7033 }));
7034
7035 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7036 {
7037 1, 2, 3,
7038 4, 5, 6,
7039 7, 8, 9,
7040 10, 11, 12,
7041 13, 14, 15,
7042 16, 17, 18,
7043
7044 19, 20, 21,
7045 22, 23, 24,
7046 25, 26, 27,
7047 28, 29, 30,
7048 31, 32, 33,
7049 34, 35, 36,
7050 }));
7051
7052 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7053 {
7054 37, 38, 39,
7055 40, 41, 42,
7056 43, 44, 45,
7057 46, 47, 48,
7058 49, 50, 51,
7059 52, 53, 54,
7060 }));
7061
7062 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007063 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007064
7065 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007066 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007067
7068
7069 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7070
7071 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7072
7073 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7074 subTensorsSupported ?
7075 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7076 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7077
7078 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7079 subTensorsSupported ?
7080 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7081 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7082
7083
Jim Flynne242f2d2019-05-22 14:24:13 +01007084 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007085 armnn::WorkloadInfo info;
7086 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7087 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7088 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7089
7090 data.m_ViewOrigins.push_back(window1);
7091 data.m_ViewOrigins.push_back(window2);
7092
Jim Flynn4ed6c832019-05-20 11:02:46 +01007093 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007094
7095 inputHandle1->Allocate();
7096 inputHandle2->Allocate();
7097 outputHandle->Allocate();
7098
7099 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7100 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7101
7102 workload->PostAllocationConfigure();
7103 workload->Execute();
7104
7105 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7106
7107 return ret;
7108}
telsoa014fcda012018-03-09 14:13:49 +00007109
surmeh01bceff2f2018-03-29 16:29:27 +01007110namespace
telsoa014fcda012018-03-09 14:13:49 +00007111{
Sadik Armagan2999a022019-04-09 14:20:12 +01007112template <typename T>
7113LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007114 armnn::IWorkloadFactory& workloadFactory,
7115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7116 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007117 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007118 float scale0,
7119 int32_t offset0,
7120 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007121 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007122 float scale1,
7123 int32_t offset1,
7124 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007125 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007126 float outScale,
7127 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007128{
Sadik Armagan2999a022019-04-09 14:20:12 +01007129 auto dataType = (std::is_same<T, uint8_t>::value ?
7130 armnn::DataType::QuantisedAsymm8 :
7131 armnn::DataType::QuantisedSymm16);
7132
7133 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7134 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7135 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007136
surmeh01bceff2f2018-03-29 16:29:27 +01007137 inputTensorInfo0.SetQuantizationScale(scale0);
7138 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007139
surmeh01bceff2f2018-03-29 16:29:27 +01007140 inputTensorInfo1.SetQuantizationScale(scale1);
7141 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007142
surmeh01bceff2f2018-03-29 16:29:27 +01007143 outputTensorInfo.SetQuantizationScale(outScale);
7144 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007145
Sadik Armagan2999a022019-04-09 14:20:12 +01007146 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7147 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007148
Sadik Armagan2999a022019-04-09 14:20:12 +01007149 LayerTestResult<T, 4> result(outputTensorInfo);
7150 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7151
7152 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7153 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7154 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7155
7156 armnn::AdditionQueueDescriptor data;
7157 armnn::WorkloadInfo info;
7158 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7159 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7160 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7161
7162 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7163
7164 inputHandle0->Allocate();
7165 inputHandle1->Allocate();
7166 outputHandle->Allocate();
7167
7168 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7169 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7170
Derek Lambertif30f7d32019-04-09 10:25:02 +01007171 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007172 workload->Execute();
7173
7174 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7175
7176 return result;
7177}
7178} // anonymous namespace
7179
7180LayerTestResult<uint8_t, 4> AdditionUint8Test(
7181 armnn::IWorkloadFactory& workloadFactory,
7182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7183{
7184 const unsigned int shape0[] = { 1, 2, 2, 3 };
7185 const unsigned int shape1[] = { 1, 2, 2, 3 };
7186
7187 std::vector<uint8_t> input0(
7188 {
7189 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7190 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7191 });
7192
7193 std::vector<uint8_t> input1(
7194 {
7195 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7196 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7197 });
7198
7199 std::vector<uint8_t> output(
7200 {
7201 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7202 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7203 });
7204
7205 return AdditionQuantizeTestHelper(workloadFactory,
7206 memoryManager,
7207 shape0, input0, 7.0f, 3,
7208 shape1, input1, 7.0f, 3,
7209 shape0, output, 7.0f, 3);
7210}
7211
7212LayerTestResult<int16_t, 4> AdditionInt16Test(
7213 armnn::IWorkloadFactory& workloadFactory,
7214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7215{
7216 const unsigned int shape0[] = { 1, 2, 2, 3 };
7217 const unsigned int shape1[] = { 1, 2, 2, 3 };
7218
7219 std::vector<int16_t> input0(
7220 {
7221 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7222 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7223 });
7224
7225 std::vector<int16_t> input1(
7226 {
7227 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7228 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7229 });
7230
7231 std::vector<int16_t> output(
7232 {
7233 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7234 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7235 });
7236
7237 return AdditionQuantizeTestHelper(workloadFactory,
7238 memoryManager,
7239 shape0, input0, 7.0f, 0,
7240 shape1, input1, 7.0f, 0,
7241 shape0, output, 7.0f, 0);
7242}
7243
7244namespace
7245{
7246template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7247LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7248 armnn::IWorkloadFactory& workloadFactory,
7249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7250 const unsigned int shape0[4],
7251 const std::vector<T> & values0,
7252 float scale0,
7253 int32_t offset0,
7254 const unsigned int shape1[4],
7255 const std::vector<T> & values1,
7256 float scale1,
7257 int32_t offset1,
7258 const unsigned int outShape[4],
7259 const std::vector<T> & outValues,
7260 float outScale,
7261 int32_t outOffset)
7262{
7263 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7264 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7265 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7266
7267 inputTensorInfo0.SetQuantizationScale(scale0);
7268 inputTensorInfo0.SetQuantizationOffset(offset0);
7269
7270 inputTensorInfo1.SetQuantizationScale(scale1);
7271 inputTensorInfo1.SetQuantizationOffset(offset1);
7272
7273 outputTensorInfo.SetQuantizationScale(outScale);
7274 outputTensorInfo.SetQuantizationOffset(outOffset);
7275
7276 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7277 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7278
7279 LayerTestResult<T, 4> result(outputTensorInfo);
7280 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007281
surmeh01bceff2f2018-03-29 16:29:27 +01007282 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007283 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007284 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7285
7286 armnn::MultiplicationQueueDescriptor data;
7287 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007288 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7289 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007290 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7291
7292 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7293
surmeh01bceff2f2018-03-29 16:29:27 +01007294 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007295 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007296 outputHandle->Allocate();
7297
surmeh01bceff2f2018-03-29 16:29:27 +01007298 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007299 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007300
Derek Lambertif30f7d32019-04-09 10:25:02 +01007301 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007302 workload->Execute();
7303
7304 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7305
7306 return result;
7307}
surmeh01bceff2f2018-03-29 16:29:27 +01007308} // anonymous namespace
7309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007310LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7311 armnn::IWorkloadFactory& workloadFactory,
7312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007313{
7314 unsigned int batchSize = 1;
7315 unsigned int channels = 2;
7316 unsigned int height = 2;
7317 unsigned int width = 3;
7318 const unsigned int shape[] = { batchSize, channels, height, width };
7319
telsoa01c577f2c2018-08-31 09:22:23 +01007320 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007321 std::vector<uint8_t> input0({
7322 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7323 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7324 });
7325
telsoa01c577f2c2018-08-31 09:22:23 +01007326 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007327 std::vector<uint8_t> input1({
7328 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7329 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7330 });
7331
telsoa01c577f2c2018-08-31 09:22:23 +01007332 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007333 std::vector<uint8_t> output(
7334 {
7335 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7336 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7337 });
7338
Sadik Armagan2999a022019-04-09 14:20:12 +01007339 // Scale/offset chosen to have output values out of range.
7340 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7341 memoryManager,
7342 shape,
7343 input0,
7344 4.0f,
7345 1,
7346 shape,
7347 input1,
7348 3.0f,
7349 -2,
7350 shape,
7351 output,
7352 1366.255f,
7353 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007354}
7355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007356LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7357 armnn::IWorkloadFactory& workloadFactory,
7358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007359{
7360 const unsigned int shape0[] = { 1, 2, 2, 3 };
7361 const unsigned int shape1[] = { 1, 1, 1, 1 };
7362
7363 std::vector<uint8_t> input0({
7364 1, 2, 3, 4, 5, 6,
7365 7, 8, 9, 10, 11, 12
7366 });
7367
7368 std::vector<uint8_t> input1({2});
7369
7370 std::vector<uint8_t> output({
7371 2, 4, 6, 8, 10, 12,
7372 14, 16, 18, 20, 22, 24
7373 });
7374
Sadik Armagan2999a022019-04-09 14:20:12 +01007375 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7376 memoryManager,
7377 shape0,
7378 input0,
7379 1.0f,
7380 0,
7381 shape1,
7382 input1,
7383 1.0f,
7384 0,
7385 shape0,
7386 output,
7387 1.0f,
7388 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007389}
7390
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007391LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7392 armnn::IWorkloadFactory& workloadFactory,
7393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007394{
7395 const unsigned int shape0[] = { 1, 2, 2, 3 };
7396 const unsigned int shape1[] = { 1, 1, 1, 3 };
7397
7398 std::vector<uint8_t> input0({
7399 1, 2, 3, 4, 5, 6,
7400 7, 8, 9, 10, 11, 12
7401 });
7402
7403 std::vector<uint8_t> input1({1, 2, 3});
7404
7405 std::vector<uint8_t> output({
7406 1, 4, 9, 4, 10, 18,
7407 7, 16, 27, 10, 22, 36
7408 });
7409
Sadik Armagan2999a022019-04-09 14:20:12 +01007410 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7411 memoryManager,
7412 shape0,
7413 input0,
7414 1.0f,
7415 0,
7416 shape1,
7417 input1,
7418 1.0f,
7419 0,
7420 shape0,
7421 output,
7422 1.0f,
7423 0);
7424}
7425
7426LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7427 armnn::IWorkloadFactory& workloadFactory,
7428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7429{
7430 const unsigned int shape[] = { 1, 2, 2, 3 };
7431
7432 std::vector<int16_t> input0(
7433 {
7434 6, 7, 8, 9, 10, 11,
7435 12, 13, 14, 15, 16, 17
7436 });
7437
7438 std::vector<int16_t> input1(
7439 {
7440 1, 2, 3, 4, 5, 6,
7441 7, 8, 9, 10, 11, 12
7442 });
7443
7444 std::vector<int16_t> output(
7445 {
7446 6, 14, 24, 36, 50, 66,
7447 84, 104, 126, 150, 176, 204
7448 });
7449
7450 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7451 memoryManager,
7452 shape,
7453 input0,
7454 1.0f,
7455 0,
7456 shape,
7457 input1,
7458 1.0f,
7459 0,
7460 shape,
7461 output,
7462 1.0f,
7463 0);
7464}
7465
7466LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7467 armnn::IWorkloadFactory& workloadFactory,
7468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7469{
7470 const unsigned int shape0[] = { 1, 2, 2, 3 };
7471 const unsigned int shape1[] = { 1, 1, 1, 1 };
7472
7473 std::vector<int16_t> input0(
7474 {
7475 1, 2, 3, 4, 5, 6,
7476 7, 8, 9, 10, 11, 12
7477 });
7478
7479 std::vector<int16_t> input1({2});
7480
7481 std::vector<int16_t> output(
7482 {
7483 2, 4, 6, 8, 10, 12,
7484 14, 16, 18, 20, 22, 24
7485 });
7486
7487 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7488 memoryManager,
7489 shape0,
7490 input0,
7491 1.0f,
7492 0,
7493 shape1,
7494 input1,
7495 1.0f,
7496 0,
7497 shape0,
7498 output,
7499 1.0f,
7500 0);
7501}
7502
7503LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7504 armnn::IWorkloadFactory& workloadFactory,
7505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7506{
7507 const unsigned int shape0[] = { 1, 2, 2, 3 };
7508 const unsigned int shape1[] = { 1, 1, 1, 3 };
7509
7510 std::vector<int16_t> input0(
7511 {
7512 1, 2, 3, 4, 5, 6,
7513 7, 8, 9, 10, 11, 12
7514 });
7515
7516 std::vector<int16_t> input1({1, 2, 3});
7517
7518 std::vector<int16_t> output(
7519 {
7520 1, 4, 9, 4, 10, 18,
7521 7, 16, 27, 10, 22, 36
7522 });
7523
7524 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7525 memoryManager,
7526 shape0,
7527 input0,
7528 1.0f,
7529 0,
7530 shape1,
7531 input1,
7532 1.0f,
7533 0,
7534 shape0,
7535 output,
7536 1.0f,
7537 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007538}
telsoa014fcda012018-03-09 14:13:49 +00007539
David Beckf195f032018-09-06 16:46:34 +01007540namespace
7541{
Sadik Armagan2999a022019-04-09 14:20:12 +01007542template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007543LayerTestResult<T, 4> SubtractionTestHelper(
7544 armnn::IWorkloadFactory& workloadFactory,
7545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7546 const unsigned int shape0[4],
7547 const std::vector<T>& values0,
7548 float scale0,
7549 int32_t offset0,
7550 const unsigned int shape1[4],
7551 const std::vector<T> & values1,
7552 float scale1,
7553 int32_t offset1,
7554 const unsigned int outShape[4],
7555 const std::vector<T> & outValues,
7556 float outScale,
7557 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007558{
Sadik Armagan2999a022019-04-09 14:20:12 +01007559 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7560 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7561 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007562
7563 inputTensorInfo0.SetQuantizationScale(scale0);
7564 inputTensorInfo0.SetQuantizationOffset(offset0);
7565
7566 inputTensorInfo1.SetQuantizationScale(scale1);
7567 inputTensorInfo1.SetQuantizationOffset(offset1);
7568
7569 outputTensorInfo.SetQuantizationScale(outScale);
7570 outputTensorInfo.SetQuantizationOffset(outOffset);
7571
7572 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7573 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7574
7575 LayerTestResult<T, 4> result(outputTensorInfo);
7576 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7577
7578 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7579 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7580 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7581
7582 armnn::SubtractionQueueDescriptor data;
7583 armnn::WorkloadInfo info;
7584 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7585 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7586 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7587
7588 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7589
7590 inputHandle0->Allocate();
7591 inputHandle1->Allocate();
7592 outputHandle->Allocate();
7593
7594 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7595 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7596
Derek Lambertif30f7d32019-04-09 10:25:02 +01007597 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007598 workload->Execute();
7599
7600 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7601
7602 return result;
7603}
7604} // anonymous namespace
7605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007606LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7607 armnn::IWorkloadFactory& workloadFactory,
7608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007609{
7610 const unsigned int shape0[] = { 1, 1, 2, 2 };
7611 const unsigned int shape1[] = { 1, 1, 2, 2 };
7612
7613 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7614 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7615 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7616
Sadik Armagan2999a022019-04-09 14:20:12 +01007617 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7618 memoryManager,
7619 shape0, input0, 0.5f, 2,
7620 shape1, input1, 1.0f, 0,
7621 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007622}
7623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007624LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7625 armnn::IWorkloadFactory& workloadFactory,
7626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007627{
7628 const unsigned int shape0[] = { 1, 1, 2, 2 };
7629 const unsigned int shape1[] = { 1, 1, 1, 1 };
7630
7631 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7632 std::vector<uint8_t> input1({ 2 });
7633 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7634
Sadik Armagan2999a022019-04-09 14:20:12 +01007635 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7636 memoryManager,
7637 shape0, input0, 0.5f, 2,
7638 shape1, input1, 1.0f, 0,
7639 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007640}
7641
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007642LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7643 armnn::IWorkloadFactory& workloadFactory,
7644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007645{
7646 const unsigned int shape0[] = { 1, 1, 2, 2 };
7647 const unsigned int shape1[] = { 1, 1, 2, 1 };
7648
7649 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7650 std::vector<uint8_t> input1({ 2, 1 });
7651 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7652
Sadik Armagan2999a022019-04-09 14:20:12 +01007653 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7654 memoryManager,
7655 shape0, input0, 1.0f, 0,
7656 shape1, input1, 1.0f, 0,
7657 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007658}
7659
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007660LayerTestResult<float, 4> SubtractionTest(
7661 armnn::IWorkloadFactory& workloadFactory,
7662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007663{
7664 const unsigned int shape0[] = { 1, 1, 2, 2 };
7665 const unsigned int shape1[] = { 1, 1, 2, 2 };
7666
7667 std::vector<float> input0({ 1, 2, 3, 4 });
7668 std::vector<float> input1({ 1, -1, 0, 2 });
7669 std::vector<float> output({ 0, 3, 3, 2 });
7670
Sadik Armagan2999a022019-04-09 14:20:12 +01007671 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7672 memoryManager,
7673 shape0, input0, 1.0f, 0,
7674 shape1, input1, 1.0f, 0,
7675 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007676}
7677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007678LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7679 armnn::IWorkloadFactory& workloadFactory,
7680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007681{
7682 const unsigned int shape0[] = { 1, 1, 2, 2 };
7683 const unsigned int shape1[] = { 1, 1, 1, 1 };
7684
7685 std::vector<float> input0({ 1, 2, 3, 4 });
7686 std::vector<float> input1({ 10 });
7687 std::vector<float> output({ -9, -8, -7, -6 });
7688
Sadik Armagan2999a022019-04-09 14:20:12 +01007689 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7690 memoryManager,
7691 shape0, input0, 1.0f, 0,
7692 shape1, input1, 1.0f, 0,
7693 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007694}
7695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007696LayerTestResult<float, 4> SubtractionBroadcastTest(
7697 armnn::IWorkloadFactory& workloadFactory,
7698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007699{
7700 const unsigned int shape0[] = { 1, 1, 2, 2 };
7701 const unsigned int shape1[] = { 1, 1, 1, 2 };
7702
7703 std::vector<float> input0({ 1, 2, 3, 4 });
7704 std::vector<float> input1({ 10, -5 });
7705 std::vector<float> output({ -9, 7, -7, 9 });
7706
Sadik Armagan2999a022019-04-09 14:20:12 +01007707 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7708 memoryManager,
7709 shape0, input0, 1.0f, 0,
7710 shape1, input1, 1.0f, 0,
7711 shape0, output, 1.0f, 0);
7712}
7713
7714LayerTestResult<int16_t, 4> SubtractionInt16Test(
7715 armnn::IWorkloadFactory& workloadFactory,
7716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7717{
7718 const unsigned int shape0[] = { 1, 1, 2, 2 };
7719 const unsigned int shape1[] = { 1, 1, 2, 2 };
7720
7721 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7722 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7723 std::vector<int16_t> output({ 3, 3, 5, 5 });
7724
7725 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7726 memoryManager,
7727 shape0, input0, 0.5f, 0,
7728 shape1, input1, 1.0f, 0,
7729 shape0, output, 1.0f, 0);
7730}
7731
7732LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7733 armnn::IWorkloadFactory& workloadFactory,
7734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7735{
7736 const unsigned int shape0[] = { 1, 1, 2, 2 };
7737 const unsigned int shape1[] = { 1, 1, 1, 1 };
7738
7739 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7740 std::vector<int16_t> input1({ 2 });
7741 std::vector<int16_t> output({ 3, 4, 5, 6 });
7742
7743 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7744 memoryManager,
7745 shape0, input0, 0.5f, 0,
7746 shape1, input1, 1.0f, 0,
7747 shape0, output, 1.0f, 0);
7748}
7749
7750LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7751 armnn::IWorkloadFactory& workloadFactory,
7752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7753{
7754 const unsigned int shape0[] = { 1, 1, 2, 2 };
7755 const unsigned int shape1[] = { 1, 1, 2, 1 };
7756
7757 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7758 std::vector<int16_t> input1({ 2, 1 });
7759 std::vector<int16_t> output({ 8, 11, 12, 15 });
7760
7761 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7762 memoryManager,
7763 shape0, input0, 1.0f, 0,
7764 shape1, input1, 1.0f, 0,
7765 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007766}
7767
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007768LayerTestResult<float, 4> BatchNormTest(
7769 armnn::IWorkloadFactory& workloadFactory,
7770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007771{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007772 // BatchSize: 1
7773 // Channels: 2
7774 // Height: 3
7775 // Width: 2
7776
7777 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7778 std::vector<float> inputValues
7779 {
7780 // Batch 0, Channel 0, Height (3) x Width (2)
7781 1.f, 4.f,
7782 4.f, 2.f,
7783 1.f, 6.f,
7784
7785 // Batch 0, Channel 1, Height (3) x Width (2)
7786 1.f, 1.f,
7787 4.f, 1.f,
7788 -2.f, 4.f
7789 };
7790 std::vector<float> expectedOutputValues
7791 {
7792 // Batch 0, Channel 0, Height (3) x Width (2)
7793 1.f, 4.f,
7794 4.f, 2.f,
7795 1.f, 6.f,
7796
7797 // Batch 0, Channel 1, Height (3) x Width (2)
7798 3.f, 3.f,
7799 4.f, 3.f,
7800 2.f, 4.f
7801 };
7802
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007803 return BatchNormTestImpl<armnn::DataType::Float32>(
7804 workloadFactory, memoryManager,
7805 inputOutputShape, inputValues, expectedOutputValues,
7806 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007807}
7808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007809LayerTestResult<float, 4> BatchNormNhwcTest(
7810 armnn::IWorkloadFactory& workloadFactory,
7811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007812{
7813 // BatchSize: 1
7814 // Height: 3
7815 // Width: 2
7816 // Channels: 2
7817
7818 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7819 std::vector<float> inputValues
7820 {
7821 // Batch 0, Height 0, Width (2) x Channel (2)
7822 1.f, 1.f,
7823 4.f, 1.f,
7824
7825 // Batch 0, Height 1, Width (2) x Channel (2)
7826 4.f, 4.f,
7827 2.f, 1.f,
7828
7829 // Batch 0, Height 2, Width (2) x Channel (2)
7830 1.f, -2.f,
7831 6.f, 4.f
7832 };
7833 std::vector<float> expectedOutputValues
7834 {
7835 // Batch 0, Height 0, Width (2) x Channel (2)
7836 1.f, 3.f,
7837 4.f, 3.f,
7838
7839 // Batch 0, Height 1, Width (2) x Channel (2)
7840 4.f, 4.f,
7841 2.f, 3.f,
7842
7843 // Batch 0, Height 2, Width (2) x Channel (2)
7844 1.f, 2.f,
7845 6.f, 4.f
7846 };
7847
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007848 return BatchNormTestImpl<armnn::DataType::Float32>(
7849 workloadFactory, memoryManager,
7850 inputOutputShape, inputValues, expectedOutputValues,
7851 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007852}
7853
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007854LayerTestResult<uint8_t, 4> BatchNormUint8Test(
7855 armnn::IWorkloadFactory& workloadFactory,
7856 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007857{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007858 // BatchSize: 1
7859 // Channels: 2
7860 // Height: 3
7861 // Width: 2
7862
7863 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7864 std::vector<float> inputValues
7865 {
7866 // Batch 0, Channel 0, Height (3) x Width (2)
7867 1.f, 4.f,
7868 4.f, 2.f,
7869 1.f, 6.f,
7870
7871 // Batch 0, Channel 1, Height (3) x Width (2)
7872 1.f, 1.f,
7873 4.f, 1.f,
7874 -2.f, 4.f
7875 };
7876 std::vector<float> expectedOutputValues
7877 {
7878 // Batch 0, Channel 0, Height (3) x Width (2)
7879 1.f, 4.f,
7880 4.f, 2.f,
7881 1.f, 6.f,
7882
7883 // Batch 0, Channel 1, Height (3) x Width (2)
7884 3.f, 3.f,
7885 4.f, 3.f,
7886 2.f, 4.f
7887 };
7888
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007889 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
7890 workloadFactory, memoryManager,
7891 inputOutputShape, inputValues, expectedOutputValues,
7892 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007893}
7894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007895LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
7896 armnn::IWorkloadFactory& workloadFactory,
7897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01007898{
7899 // BatchSize: 1
7900 // Height: 3
7901 // Width: 2
7902 // Channels: 2
7903
7904 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7905 std::vector<float> inputValues
7906 {
7907 // Batch 0, Height 0, Width (2) x Channel (2)
7908 1.f, 1.f,
7909 4.f, 1.f,
7910
7911 // Batch 0, Height 1, Width (2) x Channel (2)
7912 4.f, 4.f,
7913 2.f, 1.f,
7914
7915 // Batch 0, Height 2, Width (2) x Channel (2)
7916 1.f, -2.f,
7917 6.f, 4.f
7918 };
7919 std::vector<float> expectedOutputValues
7920 {
7921 // Batch 0, Height 0, Width (2) x Channel (2)
7922 1.f, 3.f,
7923 4.f, 3.f,
7924
7925 // Batch 0, Height 1, Width (2) x Channel (2)
7926 4.f, 4.f,
7927 2.f, 3.f,
7928
7929 // Batch 0, Height 2, Width (2) x Channel (2)
7930 1.f, 2.f,
7931 6.f, 4.f
7932 };
7933
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007934 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
7935 (workloadFactory, memoryManager,
7936 inputOutputShape, inputValues, expectedOutputValues,
7937 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00007938}
7939
Matteo Martincighf5507132019-06-04 10:59:47 +01007940LayerTestResult<int16_t, 4> BatchNormInt16Test(
7941 armnn::IWorkloadFactory& workloadFactory,
7942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7943{
7944 // BatchSize: 1
7945 // Channels: 2
7946 // Height: 3
7947 // Width: 2
7948
7949 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
7950 std::vector<float> inputValues
7951 {
7952 // Batch 0, Channel 0, Height (3) x Width (2)
7953 1.f, 4.f,
7954 4.f, 2.f,
7955 1.f, 6.f,
7956
7957 // Batch 0, Channel 1, Height (3) x Width (2)
7958 1.f, 1.f,
7959 4.f, 1.f,
7960 -2.f, 4.f
7961 };
7962 std::vector<float> expectedOutputValues
7963 {
7964 // Batch 0, Channel 0, Height (3) x Width (2)
7965 1.f, 4.f,
7966 4.f, 2.f,
7967 1.f, 6.f,
7968
7969 // Batch 0, Channel 1, Height (3) x Width (2)
7970 3.f, 3.f,
7971 4.f, 3.f,
7972 2.f, 4.f
7973 };
7974
7975 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
7976 workloadFactory, memoryManager,
7977 inputOutputShape, inputValues, expectedOutputValues,
7978 1.f/20.f, 50, armnn::DataLayout::NCHW);
7979}
7980
7981LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
7982 armnn::IWorkloadFactory& workloadFactory,
7983 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7984{
7985 // BatchSize: 1
7986 // Height: 3
7987 // Width: 2
7988 // Channels: 2
7989
7990 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
7991 std::vector<float> inputValues
7992 {
7993 // Batch 0, Height 0, Width (2) x Channel (2)
7994 1.f, 1.f,
7995 4.f, 1.f,
7996
7997 // Batch 0, Height 1, Width (2) x Channel (2)
7998 4.f, 4.f,
7999 2.f, 1.f,
8000
8001 // Batch 0, Height 2, Width (2) x Channel (2)
8002 1.f, -2.f,
8003 6.f, 4.f
8004 };
8005 std::vector<float> expectedOutputValues
8006 {
8007 // Batch 0, Height 0, Width (2) x Channel (2)
8008 1.f, 3.f,
8009 4.f, 3.f,
8010
8011 // Batch 0, Height 1, Width (2) x Channel (2)
8012 4.f, 4.f,
8013 2.f, 3.f,
8014
8015 // Batch 0, Height 2, Width (2) x Channel (2)
8016 1.f, 2.f,
8017 6.f, 4.f
8018 };
8019
8020 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8021 (workloadFactory, memoryManager,
8022 inputOutputShape, inputValues, expectedOutputValues,
8023 1.f/20.f, 50, armnn::DataLayout::NHWC);
8024}
8025
Nina Drozd58ef2c62019-05-16 12:09:18 +01008026LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008027 armnn::IWorkloadFactory& workloadFactory,
8028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008030 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008031}
8032
Nina Drozd58ef2c62019-05-16 12:09:18 +01008033LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8034 armnn::IWorkloadFactory& workloadFactory,
8035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8036{
8037 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8038}
8039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008040LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8041 armnn::IWorkloadFactory& workloadFactory,
8042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008043{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008044 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008045}
8046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008047LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8048 armnn::IWorkloadFactory& workloadFactory,
8049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008050{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008051 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008052}
8053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008054LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8055 armnn::IWorkloadFactory& workloadFactory,
8056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008057{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008058 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008059}
8060
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008061LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8062 armnn::IWorkloadFactory& workloadFactory,
8063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008064{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008065 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8066 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008067}
8068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008069LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8070 armnn::IWorkloadFactory& workloadFactory,
8071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008072{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008073 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8074 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008075}
8076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008077LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8078 armnn::IWorkloadFactory& workloadFactory,
8079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008081 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008082}
8083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008084LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8085 armnn::IWorkloadFactory& workloadFactory,
8086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008087{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008088 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008089}
8090
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008091LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8092 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8094 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008095{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008096 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8097 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008098}
8099
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008100LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8101 armnn::IWorkloadFactory& workloadFactory,
8102 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008103{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008104 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008105}
8106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008107LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8108 armnn::IWorkloadFactory& workloadFactory,
8109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008110{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008111 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8112 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008113}
8114
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008115LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8116 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8118 bool useSubtensor)
8119{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008120 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8121 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008122}
8123
8124LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8125 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008127{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008128 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008129}
8130
8131LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8132 armnn::IWorkloadFactory& workloadFactory,
8133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8134{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008135 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008136}
8137
8138LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8139 armnn::IWorkloadFactory& workloadFactory,
8140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8141{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008142 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008143}
8144
8145LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8146 armnn::IWorkloadFactory& workloadFactory,
8147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8148{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008149 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8150 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008151}
8152
8153LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8154 armnn::IWorkloadFactory& workloadFactory,
8155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8156{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008157 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8158 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008159}
8160
8161LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8162 armnn::IWorkloadFactory& workloadFactory,
8163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8164{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008165 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8166 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008167}
8168
8169LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8170 armnn::IWorkloadFactory& workloadFactory,
8171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008173 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8174 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008175}
8176
8177LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8178 armnn::IWorkloadFactory& workloadFactory,
8179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8180 bool useSubtensor)
8181{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008182 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8183 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008184}
8185
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008186LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8187 armnn::IWorkloadFactory& workloadFactory,
8188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8189 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008190{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008191 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8192 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008193}
8194
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008195LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8196 armnn::IWorkloadFactory& workloadFactory,
8197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8198 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008199{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008200 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008201 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008202}
8203
Teresa Charlin0434df62019-06-06 13:40:35 +01008204LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8205 armnn::IWorkloadFactory& workloadFactory,
8206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8207 bool forceNoPadding)
8208{
8209 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8210 workloadFactory, memoryManager, forceNoPadding);
8211}
8212
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008213LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8214 armnn::IWorkloadFactory& workloadFactory,
8215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8216 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008217{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008218 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8219 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008220}
8221
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008222LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8223 armnn::IWorkloadFactory& workloadFactory,
8224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8225 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008226{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008227 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008228 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008229}
8230
Teresa Charlin0434df62019-06-06 13:40:35 +01008231LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8232 armnn::IWorkloadFactory& workloadFactory,
8233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8234 bool forceNoPadding)
8235{
8236 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8237 workloadFactory, memoryManager, forceNoPadding);
8238}
8239
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008240LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8241 armnn::IWorkloadFactory& workloadFactory,
8242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008243 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008244{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008245 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008246}
8247
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008248LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8249 armnn::IWorkloadFactory& workloadFactory,
8250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008251 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008252{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008253 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008254}
8255
Teresa Charlin0434df62019-06-06 13:40:35 +01008256LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8257 armnn::IWorkloadFactory& workloadFactory,
8258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8259 const armnn::DataLayout dataLayout)
8260{
8261 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8262}
8263LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8264 armnn::IWorkloadFactory& workloadFactory,
8265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8266{
8267 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8268}
8269
8270LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8271 armnn::IWorkloadFactory& workloadFactory,
8272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8273{
8274 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8275 workloadFactory, memoryManager, 1.0f, -5);
8276}
8277
8278LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8279 armnn::IWorkloadFactory& workloadFactory,
8280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8281{
8282 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8283 workloadFactory, memoryManager);
8284}
8285
8286LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8287 armnn::IWorkloadFactory& workloadFactory,
8288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8289{
8290 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8291}
8292
8293LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8294 armnn::IWorkloadFactory& workloadFactory,
8295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8296{
8297 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8298 workloadFactory, memoryManager, 1.0f, -5);
8299}
8300
8301LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8302 armnn::IWorkloadFactory& workloadFactory,
8303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8304{
8305 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8306 workloadFactory, memoryManager);
8307}
8308
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008309LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8310 armnn::IWorkloadFactory& workloadFactory,
8311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008312 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008313{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008314 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008315}
8316
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008317LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8318 armnn::IWorkloadFactory& workloadFactory,
8319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008320 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008321{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008322 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008323 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008324}
8325
Teresa Charlin0434df62019-06-06 13:40:35 +01008326LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8327 armnn::IWorkloadFactory& workloadFactory,
8328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8329 const armnn::DataLayout dataLayout)
8330{
8331 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8332 workloadFactory, memoryManager, dataLayout);
8333}
8334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008335LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8336 armnn::IWorkloadFactory& workloadFactory,
8337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8338 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008339{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008340 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008341 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008342}
8343
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008344LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8345 armnn::IWorkloadFactory& workloadFactory,
8346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008348 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008349}
8350
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008351LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8352 armnn::IWorkloadFactory& workloadFactory,
8353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008355 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8356 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008357}
8358
Teresa Charlin0434df62019-06-06 13:40:35 +01008359LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8360 armnn::IWorkloadFactory& workloadFactory,
8361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8362{
8363 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8364 workloadFactory, memoryManager);
8365}
8366LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8367 armnn::IWorkloadFactory& workloadFactory,
8368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8369{
8370 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8371}
8372
8373LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8374 armnn::IWorkloadFactory& workloadFactory,
8375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8376{
8377 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8378 workloadFactory, memoryManager);
8379}
8380
8381LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8382 armnn::IWorkloadFactory& workloadFactory,
8383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8384{
8385 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8386 workloadFactory, memoryManager);
8387}
8388
8389LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8390 armnn::IWorkloadFactory& workloadFactory,
8391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8392{
8393 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8394 workloadFactory, memoryManager);
8395}
8396
8397LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8398 armnn::IWorkloadFactory& workloadFactory,
8399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8400{
8401 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8402 workloadFactory, memoryManager);
8403}
8404
8405LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8406 armnn::IWorkloadFactory& workloadFactory,
8407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8408{
8409 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8410 workloadFactory, memoryManager);
8411}
8412
8413LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8414 armnn::IWorkloadFactory& workloadFactory,
8415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8416{
8417 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8418}
8419
8420LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8421 armnn::IWorkloadFactory& workloadFactory,
8422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8423{
8424 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8425 workloadFactory, memoryManager);
8426}
8427
8428LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8429 armnn::IWorkloadFactory& workloadFactory,
8430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8431{
8432 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8433 workloadFactory, memoryManager);
8434}
8435
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008436LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8437 armnn::IWorkloadFactory& workloadFactory,
8438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008439 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008441 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008442}
8443
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008444LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8445 armnn::IWorkloadFactory& workloadFactory,
8446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008447 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008448{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008449 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008450}
8451
Teresa Charlin0434df62019-06-06 13:40:35 +01008452LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8453 armnn::IWorkloadFactory& workloadFactory,
8454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8455 const armnn::DataLayout dataLayout)
8456{
8457 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8458}
8459
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008460LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8461 armnn::IWorkloadFactory& workloadFactory,
8462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008463{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008464 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008465}
8466
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008467LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8468 armnn::IWorkloadFactory& workloadFactory,
8469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008470{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008471 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008472}
8473
Teresa Charlin0434df62019-06-06 13:40:35 +01008474LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8475 armnn::IWorkloadFactory& workloadFactory,
8476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8477{
8478 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8479}
8480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008481LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8482 armnn::IWorkloadFactory& workloadFactory,
8483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008484{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008485 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008486}
8487
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008488LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8489 armnn::IWorkloadFactory& workloadFactory,
8490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008491{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008492 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008493}
8494
Teresa Charlin0434df62019-06-06 13:40:35 +01008495LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8496 armnn::IWorkloadFactory& workloadFactory,
8497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8498{
8499 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8500}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008501LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8502 armnn::IWorkloadFactory& workloadFactory,
8503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008504{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008505 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008506}
8507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008508LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8509 armnn::IWorkloadFactory& workloadFactory,
8510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008511{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008512 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008513}
8514
Teresa Charlin0434df62019-06-06 13:40:35 +01008515LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8516 armnn::IWorkloadFactory& workloadFactory,
8517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8518{
8519 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8520}
8521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008522LayerTestResult<float, 4> L2Pooling2dSize7Test(
8523 armnn::IWorkloadFactory& workloadFactory,
8524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008525{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008526 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008527}
8528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008529LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8530 armnn::IWorkloadFactory& workloadFactory,
8531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008533 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008534}
8535
Teresa Charlin0434df62019-06-06 13:40:35 +01008536LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8537 armnn::IWorkloadFactory& workloadFactory,
8538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8539{
8540 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8541}
8542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008543LayerTestResult<float, 4> L2Pooling2dSize9Test(
8544 armnn::IWorkloadFactory& workloadFactory,
8545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008546{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008547 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008548}
8549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008550LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8551 armnn::IWorkloadFactory& workloadFactory,
8552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008553{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008554 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008555}
8556
Teresa Charlin0434df62019-06-06 13:40:35 +01008557LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
8558 armnn::IWorkloadFactory& workloadFactory,
8559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8560{
8561 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8562}
8563LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8564 armnn::IWorkloadFactory& workloadFactory,
8565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8566{
8567 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8568}
8569
8570LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8571 armnn::IWorkloadFactory& workloadFactory,
8572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8573{
8574 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8575}
8576
8577LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
8578 armnn::IWorkloadFactory& workloadFactory,
8579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8580{
8581 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8582}
8583
8584LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8585 armnn::IWorkloadFactory& workloadFactory,
8586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8587{
8588 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8589}
8590
8591LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8592 armnn::IWorkloadFactory& workloadFactory,
8593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8594{
8595 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8596}
8597
8598LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
8599 armnn::IWorkloadFactory& workloadFactory,
8600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8601{
8602 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8603}
8604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008605LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8606 armnn::IWorkloadFactory& workloadFactory,
8607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008608{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008609 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008610}
8611
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008612LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8613 armnn::IWorkloadFactory& workloadFactory,
8614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008615{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008616 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008617}
8618
Teresa Charlin0434df62019-06-06 13:40:35 +01008619LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
8620 armnn::IWorkloadFactory& workloadFactory,
8621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8622{
8623 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8624}
8625
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008626LayerTestResult<float, 4> ComparePooling2dTest(
8627 armnn::IWorkloadFactory& workloadFactory,
8628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8629 armnn::IWorkloadFactory& refWorkloadFactory,
8630 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008631{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008632 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008633 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008634}
8635
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008636LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8637 armnn::IWorkloadFactory& workloadFactory,
8638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8639 armnn::IWorkloadFactory& refWorkloadFactory,
8640 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008641{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008642 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008643 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008644}
8645
Teresa Charlin0434df62019-06-06 13:40:35 +01008646LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
8647 armnn::IWorkloadFactory& workloadFactory,
8648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8649 armnn::IWorkloadFactory& refWorkloadFactory,
8650 armnn::PoolingAlgorithm poolingType)
8651{
8652 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8653 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
8654}
8655
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008656LayerTestResult<float, 2> FullyConnectedLargeTest(
8657 armnn::IWorkloadFactory& workloadFactory,
8658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8659 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008660{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008661 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008662}
8663
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008664LayerTestResult<float, 4> SimplePermuteFloat32Test(
8665 armnn::IWorkloadFactory& workloadFactory,
8666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008667{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008668 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008669};
8670
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008671LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8672 armnn::IWorkloadFactory& workloadFactory,
8673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008674{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008675 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008676};
surmeh01bceff2f2018-03-29 16:29:27 +01008677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008678LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8679 armnn::IWorkloadFactory& workloadFactory,
8680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008681{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008682 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008683};
8684
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008685LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8686 armnn::IWorkloadFactory& workloadFactory,
8687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008688{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008689 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008690};
8691
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008692LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8693 armnn::IWorkloadFactory& workloadFactory,
8694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008695{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008696 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008697};
8698
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008699LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8700 armnn::IWorkloadFactory& workloadFactory,
8701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008702{
8703 // Create Initial Tensor
8704 // 1, 2, 3
8705 // 4, 5, 6
8706 // 7, 8, 9
8707
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008708 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8709 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008710
8711 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8712 {1, 2, 3,
8713 4, 5, 6,
8714 7, 8, 9
8715 });
8716
8717 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8718 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8719 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8720 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8721
8722 // Apply MaxPool poolSize = 1x1, stride=2x2
8723 // Result =
8724 // 1, 3
8725 // 7, 9
8726 armnn::Pooling2dDescriptor descriptor;
8727 descriptor.m_PoolHeight = 1;
8728 descriptor.m_PoolWidth = 1;
8729 descriptor.m_StrideX = 2;
8730 descriptor.m_StrideY = 2;
8731 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8732
8733 armnn::Pooling2dQueueDescriptor queueDescriptor;
8734 queueDescriptor.m_Parameters = descriptor;
8735 armnn::WorkloadInfo workloadInfo;
8736 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8737 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8738
8739 // Create the MaxPool
8740 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8741
8742 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8743 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8744 boost::multi_array<float, 4> resultMaxPool;
8745 resultMaxPool.resize(shape);
8746
8747
8748 // Create addition with another tensor the same size
8749 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8750 // with the initial tensor.
8751 // 12, 16
8752 // 24, 28
8753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008754 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8755 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008756
8757 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
8758 {12, 16,
8759 24, 28,
8760 });
8761
8762 // Expected output tensor after MaxPool and Addition.
8763 LayerTestResult<float,4> addRet(addOutputTensorInfo);
8764 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
8765 {
8766 13, 19,
8767 31, 37
8768 }));
8769
8770 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
8771 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
8772
8773 armnn::AdditionQueueDescriptor data;
8774 armnn::WorkloadInfo info;
8775
8776 // Add the output of the MaxPool and the new tensor
8777 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
8778 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
8779 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
8780
8781 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
8782
8783 poolingInputHandle->Allocate();
8784 poolingOutputHandle->Allocate();
8785 addInputHandle->Allocate();
8786 addOutputHandle->Allocate();
8787
8788 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
8789 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
8790
8791 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
8792 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
8793
Derek Lambertif30f7d32019-04-09 10:25:02 +01008794 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008795 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01008796 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008797 addWorkload->Execute();
8798
8799 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
8800
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008801 return addRet;
8802}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008803
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008804LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
8805 armnn::IWorkloadFactory& workloadFactory,
8806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008807{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008808 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008809}
8810
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008811LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
8812 armnn::IWorkloadFactory& workloadFactory,
8813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008815 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008816}
8817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008818LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
8819 armnn::IWorkloadFactory& workloadFactory,
8820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008821{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008822 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008823}
8824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008825LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
8826 armnn::IWorkloadFactory& workloadFactory,
8827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008828{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008829 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008830}
8831
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008832LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
8833 armnn::IWorkloadFactory& workloadFactory,
8834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008835{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008836 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008837}
8838
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008839LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
8840 armnn::IWorkloadFactory& workloadFactory,
8841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008842{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008843 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008844}
8845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008846LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
8847 armnn::IWorkloadFactory& workloadFactory,
8848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008849{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008850 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008851}
8852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008853LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
8854 armnn::IWorkloadFactory& workloadFactory,
8855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008856{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008857 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008858}
8859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008860LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
8861 armnn::IWorkloadFactory& workloadFactory,
8862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008863{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008864 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008865}
8866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008867LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
8868 armnn::IWorkloadFactory& workloadFactory,
8869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008870{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008871 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008872}
8873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008874LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
8875 armnn::IWorkloadFactory& workloadFactory,
8876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008877{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008878 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008879}
8880
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008881LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
8882 armnn::IWorkloadFactory& workloadFactory,
8883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008885 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008886}
8887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008888LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
8889 armnn::IWorkloadFactory& workloadFactory,
8890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008891{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008892 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008893}
8894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008895LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
8896 armnn::IWorkloadFactory& workloadFactory,
8897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008898{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008899 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008900}
8901
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008902LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
8903 armnn::IWorkloadFactory& workloadFactory,
8904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008905{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008906 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008907}
8908
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008909LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
8910 armnn::IWorkloadFactory& workloadFactory,
8911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008913 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00008914}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008915
nikraj01120522a2019-05-31 11:33:07 +01008916LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
8917 armnn::IWorkloadFactory& workloadFactory,
8918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8919{
8920 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8921}
8922
8923LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
8924 armnn::IWorkloadFactory& workloadFactory,
8925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8926{
8927 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8928}
8929
8930LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
8931 armnn::IWorkloadFactory& workloadFactory,
8932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8933{
8934 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8935}
8936
8937LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
8938 armnn::IWorkloadFactory& workloadFactory,
8939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8940{
8941 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8942}
8943
8944LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
8945 armnn::IWorkloadFactory& workloadFactory,
8946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8947{
8948 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8949}
8950
8951LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
8952 armnn::IWorkloadFactory& workloadFactory,
8953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8954{
8955 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8956}
8957
8958LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
8959 armnn::IWorkloadFactory& workloadFactory,
8960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8961{
8962 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8963}
8964
8965LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
8966 armnn::IWorkloadFactory& workloadFactory,
8967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8968{
8969 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8970}
8971
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008972namespace {
8973
8974template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008975LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
8976 armnn::IWorkloadFactory &workloadFactory,
8977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8978 const armnn::DataLayout& dataLayout,
8979 const unsigned int *inputShape,
8980 const std::vector<T> &inputData,
8981 const std::vector<unsigned int> &blockShape,
8982 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
8983 const unsigned int *outputShape,
8984 const std::vector<T> &outputData,
8985 float scale = 1.0f,
8986 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01008987{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00008988 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
8989
8990 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8991 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8992
8993 inputTensorInfo.SetQuantizationScale(scale);
8994 inputTensorInfo.SetQuantizationOffset(offset);
8995
8996 outputTensorInfo.SetQuantizationScale(scale);
8997 outputTensorInfo.SetQuantizationOffset(offset);
8998
8999 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9000
9001 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9002 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9003
9004 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9005 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9006
9007 armnn::BatchToSpaceNdQueueDescriptor data;
9008 data.m_Parameters.m_DataLayout = dataLayout;
9009 data.m_Parameters.m_BlockShape = blockShape;
9010 data.m_Parameters.m_Crops = crops;
9011 armnn::WorkloadInfo info;
9012 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9013 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9014
9015 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9016
9017 inputHandle->Allocate();
9018 outputHandle->Allocate();
9019
9020 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9021
Derek Lambertif30f7d32019-04-09 10:25:02 +01009022 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009023 workload->Execute();
9024
9025 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9026
9027 return result;
9028}
9029
9030} // anonymous namespace
9031
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009032LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9033 armnn::IWorkloadFactory& workloadFactory,
9034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009035{
9036 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009037 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009038
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009039 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009040 // Batch 0, Height 0, Width (2) x Channel (1)
9041 1.0f, 3.0f,
9042 // Batch 0, Height 1, Width (2) x Channel (1)
9043 9.0f, 11.0f,
9044
9045
9046 // Batch 1, Height 0, Width (2) x Channel (1)
9047 2.0f, 4.0f,
9048 // Batch 1, Height 1, Width (2) x Channel (1)
9049 10.0f, 12.0f,
9050
9051
9052 // Batch 2, Height 0, Width (2) x Channel (1)
9053 5.0f, 7.0f,
9054 // Batch 2, Height 1, Width (2) x Channel (1)
9055 13.0f, 15.0f,
9056
9057 // Batch 3, Height 0, Width (2) x Channel (3)
9058 6.0f, 8.0f,
9059 // Batch 3, Height 1, Width (2) x Channel (1)
9060 14.0f, 16.0f
9061 });
9062
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009063 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009064 1.0f, 2.0f, 3.0f, 4.0f,
9065 5.0f, 6.0f, 7.0f, 8.0f,
9066 9.0f, 10.0f, 11.0f, 12.0f,
9067 13.0f, 14.0f, 15.0f, 16.0f
9068 });
9069
9070 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009071 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009072
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009073 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9074 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009075 crops, outputShape, expectedOutput);
9076}
9077
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009078LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9079 armnn::IWorkloadFactory& workloadFactory,
9080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009081{
9082 const unsigned int inputShape[] = {4, 1, 1, 1};
9083 const unsigned int outputShape[] = {1, 2, 2, 1};
9084
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009085 std::vector<float> input({
9086 // Batch 0, Height 0, Width (2) x Channel (1)
9087 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009088 });
9089
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009090 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009091
9092 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009093 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009094
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009095 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9096 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9097 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009098}
9099
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009100LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9101 armnn::IWorkloadFactory& workloadFactory,
9102 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009103{
9104 const unsigned int inputShape[] = {4, 1, 1, 3};
9105 const unsigned int outputShape[] = {1, 2, 2, 3};
9106
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009107 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009108
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009109 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009110
9111 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009112 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009114 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9115 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9116 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009117}
9118
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009119LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9120 armnn::IWorkloadFactory& workloadFactory,
9121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9122{
9123 const unsigned int inputShape[] = {8, 1, 3, 1};
9124 const unsigned int outputShape[] = {2, 2, 4, 1};
9125
9126 std::vector<float> input({
9127 0.0f, 1.0f, 3.0f,
9128 0.0f, 9.0f, 11.0f,
9129 0.0f, 2.0f, 4.0f,
9130 0.0f, 10.0f, 12.0f,
9131 0.0f, 5.0f, 7.0f,
9132 0.0f, 13.0f, 15.0f,
9133 0.0f, 6.0f, 8.0f,
9134 0.0f, 14.0f, 16.0f
9135 });
9136
9137 std::vector<float> expectedOutput({
9138 1.0f, 2.0f, 3.0f, 4.0f,
9139 5.0f, 6.0f, 7.0f, 8.0f,
9140 9.0f, 10.0f, 11.0f, 12.0f,
9141 13.0f, 14.0f, 15.0f, 16.0f
9142 });
9143
9144 std::vector<unsigned int> blockShape({2, 2});
9145 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9146
9147 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9148 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9149 crops, outputShape, expectedOutput);
9150}
9151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009152LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9153 armnn::IWorkloadFactory &workloadFactory,
9154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009155{
9156 const unsigned int inputShape[] = {4, 3, 1, 1};
9157 const unsigned int outputShape[] = {1, 3, 2, 2};
9158
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009159 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009160
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009161 std::vector<float> expectedOutput({
9162 // Batch 0, Channel 0, Height (2) x Width (2)
9163 1.0f, 4.0f,
9164 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009165
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009166 // Batch 0, Channel 1, Height (2) x Width (2)
9167 2.0f, 5.0f,
9168 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009169
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009170 // Batch 0, Channel 2, Height (2) x Width (2)
9171 3.0f, 6.0f,
9172 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009173 });
9174
9175 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009176 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009177
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009178 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9179 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9180 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009181}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009182
Mike Kelly831faed2018-11-28 11:52:08 +00009183LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009184 armnn::IWorkloadFactory& workloadFactory,
9185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009186{
9187 const unsigned int inputShape[] = {4, 1, 1, 1};
9188 const unsigned int outputShape[] = {1, 1, 2, 2};
9189
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009190 std::vector<float> input({
9191 // Batch 0, Height 0, Width (2) x Channel (1)
9192 1.0f, 2.0f, 3.0f, 4.0f
9193 });
Mike Kelly831faed2018-11-28 11:52:08 +00009194
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009195 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009196
9197 std::vector<unsigned int> blockShape({2, 2});
9198 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9199
9200 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9201 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9202 crops, outputShape, expectedOutput);
9203}
9204
9205LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009206 armnn::IWorkloadFactory& workloadFactory,
9207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009208{
9209 const unsigned int inputShape[] = {4, 3, 1, 1};
9210 const unsigned int outputShape[] = {1, 3, 2, 2};
9211
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009212 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009213
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009214 std::vector<float> expectedOutput({
9215 // Batch 0, Channel 0, Height (2) x Width (2)
9216 1.0f, 7.0f,
9217 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009218
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009219 // Batch 0, Channel 1, Height (2) x Width (2)
9220 3.0f, 9.0f,
9221 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009222
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009223 // Batch 0, Channel 2, Height (2) x Width (2)
9224 5.0f, 11.0f,
9225 6.0f, 12.0f,
9226 });
Mike Kelly831faed2018-11-28 11:52:08 +00009227
9228 std::vector<unsigned int> blockShape({2, 2});
9229 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9230
9231 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9232 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9233 crops, outputShape, expectedOutput);
9234}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009236LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9237 armnn::IWorkloadFactory& workloadFactory,
9238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009239{
9240 const unsigned int inputShape[] = {4, 2, 2, 1};
9241 const unsigned int outputShape[] = {1, 4, 4, 1};
9242
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009243 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9244 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009245
9246 std::vector<unsigned int> blockShape({2, 2});
9247 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9248
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009249 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9250 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009251}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009252
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009253LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9254 armnn::IWorkloadFactory& workloadFactory,
9255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9256{
9257 const unsigned int inputShape[] = {4, 1, 1, 1};
9258 const unsigned int outputShape[] = {1, 2, 2, 1};
9259
9260 std::vector<uint8_t> input({
9261 // Batch 0, Height 0, Width (2) x Channel (1)
9262 1, 2, 3, 4
9263 });
9264
9265 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9266
9267 std::vector<unsigned int> blockShape({2, 2});
9268 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9269
9270 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9271 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9272 crops, outputShape, expectedOutput);
9273}
9274
9275LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9276 armnn::IWorkloadFactory& workloadFactory,
9277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9278{
9279 const unsigned int inputShape[] = {4, 1, 1, 3};
9280 const unsigned int outputShape[] = {1, 2, 2, 3};
9281
9282 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9283
9284 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9285
9286 std::vector<unsigned int> blockShape({2, 2});
9287 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9288
9289 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9290 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9291 crops, outputShape, expectedOutput);
9292}
9293
9294
9295LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9296 armnn::IWorkloadFactory &workloadFactory,
9297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9298{
9299 const unsigned int inputShape[] = {4, 3, 1, 1};
9300 const unsigned int outputShape[] = {1, 3, 2, 2};
9301
9302 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9303
9304 std::vector<uint8_t> expectedOutput({
9305 // Batch 0, Channel 0, Height (2) x Width (2)
9306 1, 4,
9307 7, 10,
9308
9309 // Batch 0, Channel 1, Height (2) x Width (2)
9310 2, 5,
9311 8, 11,
9312
9313 // Batch 0, Channel 2, Height (2) x Width (2)
9314 3, 6,
9315 9, 12,
9316 });
9317
9318 std::vector<unsigned int> blockShape({2, 2});
9319 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9320
9321 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9322 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9323 crops, outputShape, expectedOutput);
9324}
9325
9326LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9327 armnn::IWorkloadFactory& workloadFactory,
9328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9329{
9330 const unsigned int inputShape[] = {4, 1, 1, 1};
9331 const unsigned int outputShape[] = {1, 1, 2, 2};
9332
9333 std::vector<uint8_t> input({
9334 // Batch 0, Height 0, Width (2) x Channel (1)
9335 1, 2, 3, 4
9336 });
9337
9338 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9339
9340 std::vector<unsigned int> blockShape({2, 2});
9341 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9342
9343 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9344 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9345 crops, outputShape, expectedOutput);
9346}
9347
9348LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9349 armnn::IWorkloadFactory& workloadFactory,
9350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9351{
9352 const unsigned int inputShape[] = {4, 3, 1, 1};
9353 const unsigned int outputShape[] = {1, 3, 2, 2};
9354
9355 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9356
9357 std::vector<uint8_t> expectedOutput({
9358 // Batch 0, Channel 0, Height (2) x Width (2)
9359 1, 7,
9360 2, 8,
9361
9362 // Batch 0, Channel 1, Height (2) x Width (2)
9363 3, 9,
9364 4, 10,
9365
9366 // Batch 0, Channel 2, Height (2) x Width (2)
9367 5, 11,
9368 6, 12,
9369 });
9370
9371 std::vector<unsigned int> blockShape({2, 2});
9372 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9373
9374 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9375 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9376 crops, outputShape, expectedOutput);
9377}
9378
9379LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9380 armnn::IWorkloadFactory& workloadFactory,
9381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9382{
9383 const unsigned int inputShape[] = {8, 1, 1, 3};
9384 const unsigned int outputShape[] = {2, 1, 2, 4};
9385
9386 std::vector<uint8_t> input({
9387 0, 1, 3, 0, 9, 11,
9388 0, 2, 4, 0, 10, 12,
9389 0, 5, 7, 0, 13, 15,
9390 0, 6, 8, 0, 14, 16
9391 });
9392
9393 std::vector<uint8_t> expectedOutput({
9394 1, 2, 3, 4,
9395 5, 6, 7, 8,
9396 9, 10, 11, 12,
9397 13, 14, 15, 16
9398 });
9399
9400 std::vector<unsigned int> blockShape({2, 2});
9401 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9402
9403 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9404 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9405 crops, outputShape, expectedOutput);
9406}
9407
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009408LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9409 armnn::IWorkloadFactory& workloadFactory,
9410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9411{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009412 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009413}
9414
9415LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9416 armnn::IWorkloadFactory& workloadFactory,
9417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9418{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009419 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009420}
9421
9422LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9423 armnn::IWorkloadFactory& workloadFactory,
9424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009426 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009427}
9428
9429LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9430 armnn::IWorkloadFactory& workloadFactory,
9431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9432{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009433 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009434}
9435
9436LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9437 armnn::IWorkloadFactory& workloadFactory,
9438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009440 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009441}
9442
9443LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9444 armnn::IWorkloadFactory& workloadFactory,
9445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9446{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009447 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009448}
9449
9450LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9451 armnn::IWorkloadFactory& workloadFactory,
9452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9453{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009454 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009455}
9456
9457LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9458 armnn::IWorkloadFactory& workloadFactory,
9459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9460{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009461 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009462}
9463
9464LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9465 armnn::IWorkloadFactory& workloadFactory,
9466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9467{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009468 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009469}
9470
9471LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9472 armnn::IWorkloadFactory& workloadFactory,
9473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9474{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009475 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009476}
9477
9478LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9479 armnn::IWorkloadFactory& workloadFactory,
9480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9481{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009482 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009483}
9484
9485LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9486 armnn::IWorkloadFactory& workloadFactory,
9487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9488{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009489 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009490}
9491
9492LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9493 armnn::IWorkloadFactory& workloadFactory,
9494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9495{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009496 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009497}
9498
9499LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9500 armnn::IWorkloadFactory& workloadFactory,
9501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009503 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009504}
9505
9506LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9507 armnn::IWorkloadFactory& workloadFactory,
9508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9509{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009510 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009511}
9512
9513LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9514 armnn::IWorkloadFactory& workloadFactory,
9515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009517 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009518}
9519
9520LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9521 armnn::IWorkloadFactory& workloadFactory,
9522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9523{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009524 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009525}
9526
9527LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9528 armnn::IWorkloadFactory& workloadFactory,
9529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9530{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009531 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009532}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009533
Matteo Martincigh42666a12019-05-29 08:53:41 +01009534LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9535 armnn::IWorkloadFactory& workloadFactory,
9536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9537{
9538 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9539}
9540
9541LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9542 armnn::IWorkloadFactory& workloadFactory,
9543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9544{
9545 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9546}
9547
9548LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9549 armnn::IWorkloadFactory& workloadFactory,
9550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9551{
9552 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9553}
9554
9555LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9556 armnn::IWorkloadFactory& workloadFactory,
9557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9558{
9559 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9560}
9561
9562LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9563 armnn::IWorkloadFactory& workloadFactory,
9564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9565{
9566 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9567}
9568
9569LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9570 armnn::IWorkloadFactory& workloadFactory,
9571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9572{
9573 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9574}
9575
9576LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9577 armnn::IWorkloadFactory& workloadFactory,
9578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9579{
9580 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9581}
9582
9583LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9584 armnn::IWorkloadFactory& workloadFactory,
9585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9586{
9587 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9588}
9589
9590LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9591 armnn::IWorkloadFactory& workloadFactory,
9592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9593{
9594 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9595}
9596
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009597LayerTestResult<float, 4> Debug4DFloat32Test(
9598 armnn::IWorkloadFactory& workloadFactory,
9599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9600{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009601 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009602}
9603
9604LayerTestResult<float, 3> Debug3DFloat32Test(
9605 armnn::IWorkloadFactory& workloadFactory,
9606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9607{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009608 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009609}
9610
9611LayerTestResult<float, 2> Debug2DFloat32Test(
9612 armnn::IWorkloadFactory& workloadFactory,
9613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9614{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009615 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009616}
9617
9618LayerTestResult<float, 1> Debug1DFloat32Test(
9619 armnn::IWorkloadFactory& workloadFactory,
9620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9621{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009622 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009623}
9624
9625LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9626 armnn::IWorkloadFactory& workloadFactory,
9627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009629 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009630}
9631
9632LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9633 armnn::IWorkloadFactory& workloadFactory,
9634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009636 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009637}
9638
9639LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9640 armnn::IWorkloadFactory& workloadFactory,
9641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009643 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009644}
9645
9646LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9647 armnn::IWorkloadFactory& workloadFactory,
9648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009650 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009651}
Matteo Martincigh49124022019-01-11 13:25:59 +00009652
narpra014951d842019-01-18 16:53:53 +00009653LayerTestResult<float, 1> Gather1DParamsFloatTest(
9654 armnn::IWorkloadFactory& workloadFactory,
9655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9656{
9657 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9658}
9659
9660LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9661 armnn::IWorkloadFactory& workloadFactory,
9662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9663{
9664 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9665}
9666
9667LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9668 armnn::IWorkloadFactory& workloadFactory,
9669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9670{
9671 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9672}
9673
9674LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9675 armnn::IWorkloadFactory& workloadFactory,
9676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9677{
9678 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9679}
9680
9681LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9682 armnn::IWorkloadFactory& workloadFactory,
9683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9684{
9685 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9686}
9687
9688LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9689 armnn::IWorkloadFactory& workloadFactory,
9690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9691{
9692 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9693 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009694}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009695
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009696LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009697 armnn::IWorkloadFactory& workloadFactory,
9698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9699{
9700 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9701}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009702
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009703LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9704 armnn::IWorkloadFactory& workloadFactory,
9705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9706{
9707 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9708}
9709
9710LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9711 armnn::IWorkloadFactory& workloadFactory,
9712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9713{
9714 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9715}
9716
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009717LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9718 armnn::IWorkloadFactory& workloadFactory,
9719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9720{
9721 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9722}
9723
9724LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9725 armnn::IWorkloadFactory& workloadFactory,
9726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9727{
9728 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9729}
9730
9731LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9732 armnn::IWorkloadFactory& workloadFactory,
9733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9734{
9735 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9736}