blob: a625097fdb23bd8194f7ddde031115af8a812e95 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Teresa Charlinedeeb162019-06-14 11:09:19 +0100586LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
587 armnn::IWorkloadFactory& workloadFactory,
588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
589 armnn::DataLayout layout)
590{
591 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
592 workloadFactory, memoryManager, layout, 0.0f, 0);
593}
594
595LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
596 armnn::IWorkloadFactory& workloadFactory,
597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
598 armnn::DataLayout layout)
599{
600 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
601 <armnn::DataType::Float32, armnn::DataType::Float32>(
602 workloadFactory, memoryManager, layout, 0.0f, 0);
603}
604
605LayerTestResult<float, 4> Convolution1dTest(
606 armnn::IWorkloadFactory& workloadFactory,
607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
608 bool biasEnabled)
609{
610 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
611 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
612}
613
614LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 bool biasEnabled)
618{
619 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
620 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
621}
622
623LayerTestResult<float,4> CompareConvolution2dTest(
624 armnn::IWorkloadFactory& workloadFactory,
625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
626 armnn::IWorkloadFactory& refWorkloadFactory)
627{
628 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
629 workloadFactory, memoryManager, refWorkloadFactory);
630}
631
632template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
633LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const std::vector<float>& inputNoQuantizedValues,
637 armnn::TensorInfo& inputTensorInfo,
638 const std::vector<float>& kernelNoQuantizedValues,
639 armnn::TensorInfo& kernelTensorInfo,
640 const std::vector<float>& outputExpectedNoQuantizedValues,
641 armnn::TensorInfo& outputTensorInfo,
642 uint32_t dilationX,
643 uint32_t dilationY,
644 armnn::DataLayout layout = armnn::DataLayout::NCHW,
645 bool biasEnabled = false
646)
647{
648 float qScale;
649 int32_t qOffset;
650 switch (ArmnnType)
651 {
652 case armnn::DataType::QuantisedAsymm8:
653 {
654 qScale = 0.1f;
655 qOffset = 128;
656 break;
657 }
658 case armnn::DataType::QuantisedSymm16:
659 {
660 qScale = 0.1f;
661 qOffset = 0;
662 break;
663 }
664 case armnn::DataType::Float32:
665 default:
666 {
667 qScale = 0.f;
668 qOffset = 0;
669 break;
670 }
671 }
672
673 inputTensorInfo.SetQuantizationScale(qScale);
674 inputTensorInfo.SetQuantizationOffset(qOffset);
675 kernelTensorInfo.SetQuantizationScale(qScale);
676 kernelTensorInfo.SetQuantizationOffset(qOffset);
677 outputTensorInfo.SetQuantizationScale(qScale);
678 outputTensorInfo.SetQuantizationOffset(qOffset);
679
680 auto input = MakeTensor<T, 4>(inputTensorInfo,
681 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
682 inputTensorInfo.GetQuantizationOffset(),
683 inputNoQuantizedValues)));
684 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
685 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
686 kernelTensorInfo.GetQuantizationOffset(),
687 kernelNoQuantizedValues)));
688 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
689 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
690 outputTensorInfo.GetQuantizationOffset(),
691 outputExpectedNoQuantizedValues)));
692
693 uint32_t padLeft = 0;
694 uint32_t padTop = 0;
695 uint32_t padRight = 0;
696 uint32_t padBottom = 0;
697 uint32_t strideX = 1;
698 uint32_t strideY = 1;
699
700 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
701 workloadFactory,
702 memoryManager,
703 input,
704 kernel,
705 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
706 expectedOutput,
707 qScale,
708 qOffset,
709 layout,
710 padLeft,
711 padTop,
712 padRight,
713 padBottom,
714 strideX,
715 strideY,
716 dilationX,
717 dilationY);
718}
719
720template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
721LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
722 armnn::IWorkloadFactory& workloadFactory,
723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
724 bool biasEnabled,
725 const armnn::DataLayout layout)
726{
727 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
728 std::vector<float> inputNoQuantizedValues =
729 {
730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
733 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
740 };
741
742 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
743 std::vector<float> kernelNoQuantizedValues =
744 {
745 1, 2, 3,
746 4, 5, 6,
747 7, 8, 9
748 };
749
750 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
751 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
752 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
753 std::vector<float> outputExpectedNoQuantizedValues =
754 {
755 6., 5., 5., 5.,
756 6., 5., 5., 5.,
757 6., 5., 5., 5.,
758 3., 2., 2., 2.
759 };
760
761 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
762 workloadFactory,
763 memoryManager,
764 inputNoQuantizedValues,
765 inputTensorInfo,
766 kernelNoQuantizedValues,
767 kernelTensorInfo,
768 outputExpectedNoQuantizedValues,
769 outputTensorInfo,
770 3,
771 3,
772 layout,
773 biasEnabled);
774}
775
776template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
777LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
778 armnn::IWorkloadFactory& workloadFactory,
779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
780 bool biasEnabled,
781 const armnn::DataLayout layout)
782{
783 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
784 std::vector<float> inputNoQuantizedValues =
785 {
786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
792 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
796
797 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
800 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
803 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
806 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
807 };
808
809 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
810 std::vector<float> kernelNoQuantizedValues =
811 {
812 1, 2, 3,
813 4, 5, 6,
814 7, 8, 9,
815
816 1, 2, 3,
817 4, 5, 6,
818 7, 8, 9
819 };
820
821 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
822 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
823 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
824 std::vector<float> outputExpectedNoQuantizedValues =
825 {
826 12., 10., 10., 10.,
827 12., 10., 10., 10.,
828 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100829 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100830 };
831
832 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
833 workloadFactory,
834 memoryManager,
835 inputNoQuantizedValues,
836 inputTensorInfo,
837 kernelNoQuantizedValues,
838 kernelTensorInfo,
839 outputExpectedNoQuantizedValues,
840 outputTensorInfo,
841 3,
842 3,
843 layout,
844 biasEnabled);
845}
846
847template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
848Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
849 armnn::IWorkloadFactory&,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
851 bool,
852 armnn::DataLayout);
853
854template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
855Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
856 armnn::IWorkloadFactory&,
857 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
858 bool,
859 armnn::DataLayout);
860
861template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
862Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
863 armnn::IWorkloadFactory&,
864 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
865 bool,
866 armnn::DataLayout);
867
868template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
869Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
870 armnn::IWorkloadFactory&,
871 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
872 bool,
873 armnn::DataLayout);
874
875template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
876Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
877 armnn::IWorkloadFactory&,
878 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
879 bool,
880 armnn::DataLayout);
881
882template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
883Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
884 armnn::IWorkloadFactory&,
885 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
886 bool,
887 armnn::DataLayout);
888
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000889template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
890 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000891LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
892 armnn::IWorkloadFactory& workloadFactory,
893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
894 float qScale,
895 int32_t qOffset,
896 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000897 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100898{
telsoa01c577f2c2018-08-31 09:22:23 +0100899 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000900 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100901 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100902 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
903 {
surmeh013537c2c2018-05-18 16:31:43 +0100904 0, 1, 2, 3, 4,
905 5, 6, 7, 8, 9,
906 10, 11, 12, 13, 14,
907 15, 16, 17, 18, 19,
908 20, 21, 22, 23, 24,
909
910 25, 26, 27, 28, 29,
911 30, 31, 32, 33, 34,
912 35, 36, 37, 38, 39,
913 40, 41, 42, 43, 44,
914 45, 46, 47, 48, 49
915 })));
916
telsoa01c577f2c2018-08-31 09:22:23 +0100917 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000918 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100919 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100920 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
921 {
surmeh013537c2c2018-05-18 16:31:43 +0100922 32, 31, 30, 29,
923 28, 27, 26, 25,
924 24, 23, 22, 21,
925 20, 19, 18, 17,
926
927 16, 15, 14, 13,
928 12, 11, 10, 9,
929 8, 7, 6, 5,
930 4, 3, 2, 1
931 })));
932
telsoa01c577f2c2018-08-31 09:22:23 +0100933 // Expected output is 1 batch of a 2-channel 5x5 image.
934 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000935 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100936 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100937 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
938 {
surmeh013537c2c2018-05-18 16:31:43 +0100939 1062, 1580, 1850, 1530, 1117,
940 2140, 3108, 3500, 2842, 2042,
941 3580, 5068, 5460, 4342, 3062,
942 3618, 5072, 5390, 4248, 2971,
943 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100944
surmeh013537c2c2018-05-18 16:31:43 +0100945 1550, 2284, 2362, 1955, 1428,
946 2910, 4206, 4342, 3528, 2536,
947 3390, 4886, 5022, 4068, 2916,
948 3566, 5056, 5182, 4133, 2922,
949 3100, 4352, 4452, 3517, 2465
950 })));
951
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000952 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
953 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000954 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100955 input,
956 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100957 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100958 expectedOutput,
959 qScale,
960 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100961 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100962 1, // Padding left.
963 1, // Padding top.
964 2, // Padding right.
965 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100966 1, // strideX
967 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000968}
969
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000970template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
971 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000972LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
973 armnn::IWorkloadFactory& workloadFactory,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
975 float qScale,
976 int32_t qOffset,
977 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100978{
Teresa Charlin20b1f882019-06-19 09:34:37 +0100979 auto layout = armnn::DataLayout::NHWC;
980
981 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100982 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100983 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
984 {
985 0, 1, 2, 3, 4,
986 5, 6, 7, 8, 9,
987 10, 11, 12, 13, 14,
988 15, 16, 17, 18, 19,
989 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100990
Teresa Charlin20b1f882019-06-19 09:34:37 +0100991 25, 26, 27, 28, 29,
992 30, 31, 32, 33, 34,
993 35, 36, 37, 38, 39,
994 40, 41, 42, 43, 44,
995 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +0100996 })));
997
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000998 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100999 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001000 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1001 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001002 32, 31, 30, 29,
1003 28, 27, 26, 25,
1004 24, 23, 22, 21,
1005 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001006
Matteo Martincigh747ef822018-12-18 09:26:39 +00001007 16, 15, 14, 13,
1008 12, 11, 10, 9,
1009 8, 7, 6, 5,
1010 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001011 })));
1012
Teresa Charlin20b1f882019-06-19 09:34:37 +01001013 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001014 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001015 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1016 {
1017 1062, 1580, 1850, 1530, 1117,
1018 2140, 3108, 3500, 2842, 2042,
1019 3580, 5068, 5460, 4342, 3062,
1020 3618, 5072, 5390, 4248, 2971,
1021 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001022
Teresa Charlin20b1f882019-06-19 09:34:37 +01001023 1550, 2284, 2362, 1955, 1428,
1024 2910, 4206, 4342, 3528, 2536,
1025 3390, 4886, 5022, 4068, 2916,
1026 3566, 5056, 5182, 4133, 2922,
1027 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001028 })));
1029
Teresa Charlin20b1f882019-06-19 09:34:37 +01001030 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001031 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001032 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001033 input,
1034 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001035 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001036 expectedOutput,
1037 qScale,
1038 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001039 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001040 1, // Padding left.
1041 1, // Padding top.
1042 2, // Padding right.
1043 2, // Padding bottom.
1044 1, // strideX
1045 1); // strideY
1046}
1047
Bruno Goncalves22972f02019-04-26 21:03:24 -03001048template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1049 typename T = armnn::ResolveType<ArmnnType>>
1050LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1051 armnn::IWorkloadFactory& workloadFactory,
1052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1053 float qScale,
1054 int32_t qOffset,
1055 bool biasEnabled)
1056{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001057 auto layout = armnn::DataLayout::NHWC;
1058
1059 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001060 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1062 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001063 0, 0, 0, 0, 0, 0, 0, 0, 0,
1064 0, 0, 0, 0, 0, 0, 0, 0, 0,
1065 0, 0, 0, 0, 0, 0, 0, 0, 0,
1066 0, 0, 0, 1, 1, 1, 0, 0, 0,
1067 0, 0, 0, 1, 1, 1, 0, 0, 0,
1068 0, 0, 0, 1, 1, 1, 0, 0, 0,
1069 0, 0, 0, 0, 0, 0, 0, 0, 0,
1070 0, 0, 0, 0, 0, 0, 0, 0, 0,
1071 0, 0, 0, 0, 0, 0, 0, 0, 0
1072 })));
1073
1074 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1075 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001076 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1077 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001078 1, 2, 3,
1079 4, 5, 6,
1080 7, 8, 9
1081 })));
1082
1083 uint32_t padLeft = 0;
1084 uint32_t padTop = 0;
1085 uint32_t padRight = 0;
1086 uint32_t padBottom = 0;
1087 uint32_t strideX = 1;
1088 uint32_t strideY = 1;
1089 uint32_t dilationX = 3;
1090 uint32_t dilationY = 3;
1091
1092 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001093 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001094 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001095 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1096 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001097 5, 5, 5,
1098 5, 5, 5,
1099 5, 5, 5
1100 })));
1101
Teresa Charlin20b1f882019-06-19 09:34:37 +01001102 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001103 workloadFactory,
1104 memoryManager,
1105 input,
1106 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001107 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001108 expectedOutput,
1109 qScale,
1110 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001111 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001112 padLeft,
1113 padTop,
1114 padRight,
1115 padBottom,
1116 strideX,
1117 strideY,
1118 dilationX,
1119 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001120}
1121
Teresa Charlin20b1f882019-06-19 09:34:37 +01001122
1123template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1124LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1125 armnn::IWorkloadFactory& workloadFactory,
1126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1127 const std::vector<float>& inputNoQuantizedValues,
1128 armnn::TensorInfo& inputTensorInfo,
1129 const std::vector<float>& kernelNoQuantizedValues,
1130 armnn::TensorInfo& kernelTensorInfo,
1131 const std::vector<float>& outputExpectedNoQuantizedValues,
1132 armnn::TensorInfo& outputTensorInfo,
1133 uint32_t dilationX,
1134 uint32_t dilationY,
1135 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1136 bool biasEnabled = false)
1137{
1138 float qScale;
1139 int32_t qOffset;
1140 switch (ArmnnType)
1141 {
1142 case armnn::DataType::QuantisedAsymm8:
1143 {
1144 qScale = 0.1f;
1145 qOffset = 128;
1146 break;
1147 }
1148 case armnn::DataType::QuantisedSymm16:
1149 {
1150 qScale = 0.1f;
1151 qOffset = 0;
1152 break;
1153 }
1154 case armnn::DataType::Float32:
1155 default:
1156 {
1157 qScale = 0.f;
1158 qOffset = 0;
1159 break;
1160 }
1161 }
1162
1163 inputTensorInfo.SetQuantizationScale(qScale);
1164 inputTensorInfo.SetQuantizationOffset(qOffset);
1165 kernelTensorInfo.SetQuantizationScale(qScale);
1166 kernelTensorInfo.SetQuantizationOffset(qOffset);
1167 outputTensorInfo.SetQuantizationScale(qScale);
1168 outputTensorInfo.SetQuantizationOffset(qOffset);
1169
1170 auto input = MakeTensor<T, 4>(inputTensorInfo,
1171 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1172 inputTensorInfo.GetQuantizationOffset(),
1173 inputNoQuantizedValues)));
1174 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1175 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1176 kernelTensorInfo.GetQuantizationOffset(),
1177 kernelNoQuantizedValues)));
1178 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1179 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1180 outputTensorInfo.GetQuantizationOffset(),
1181 outputExpectedNoQuantizedValues)));
1182
1183 uint32_t padLeft = 0;
1184 uint32_t padTop = 0;
1185 uint32_t padRight = 0;
1186 uint32_t padBottom = 0;
1187 uint32_t strideX = 1;
1188 uint32_t strideY = 1;
1189
1190 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1191 workloadFactory,
1192 memoryManager,
1193 input,
1194 kernel,
1195 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1196 expectedOutput,
1197 qScale,
1198 qOffset,
1199 layout,
1200 padLeft,
1201 padTop,
1202 padRight,
1203 padBottom,
1204 strideX,
1205 strideY,
1206 dilationX,
1207 dilationY);
1208}
1209
1210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1211LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1214 bool biasEnabled,
1215 const armnn::DataLayout layout)
1216{
1217 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1218 std::vector<float> inputNoQuantizedValues =
1219 {
1220 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1222 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1223 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1224 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1225 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1230 };
1231
1232 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1233 std::vector<float> kernelNoQuantizedValues =
1234 {
1235 1, 2, 3,
1236 4, 5, 6,
1237 7, 8, 9
1238 };
1239
1240 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1241 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1242 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1243 std::vector<float> outputExpectedNoQuantizedValues =
1244 {
1245 6., 5., 5., 5.,
1246 6., 5., 5., 5.,
1247 6., 5., 5., 5.,
1248 3., 2., 2., 2.
1249 };
1250
1251 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1252 workloadFactory,
1253 memoryManager,
1254 inputNoQuantizedValues,
1255 inputTensorInfo,
1256 kernelNoQuantizedValues,
1257 kernelTensorInfo,
1258 outputExpectedNoQuantizedValues,
1259 outputTensorInfo,
1260 3,
1261 3,
1262 layout,
1263 biasEnabled);
1264}
1265
1266template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1267LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1268 armnn::IWorkloadFactory& workloadFactory,
1269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1270 bool biasEnabled,
1271 const armnn::DataLayout layout)
1272{
1273 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1274 std::vector<float> inputNoQuantizedValues =
1275 {
1276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1279 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1280 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1281 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1282 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1286
1287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1290 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1291 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1292 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1297 };
1298
1299 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1300 std::vector<float> kernelNoQuantizedValues =
1301 {
1302 1, 2, 3,
1303 4, 5, 6,
1304 7, 8, 9,
1305
1306 1, 2, 3,
1307 4, 5, 6,
1308 7, 8, 9
1309 };
1310
1311 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1312 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1313 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1314 std::vector<float> outputExpectedNoQuantizedValues =
1315 {
1316 6., 5., 5., 5.,
1317 6., 5., 5., 5.,
1318 6., 5., 5., 5.,
1319 3., 2., 2., 2.,
1320
1321 6., 5., 5., 5.,
1322 6., 5., 5., 5.,
1323 6., 5., 5., 5.,
1324 3., 2., 2., 2.
1325 };
1326
1327 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1328 workloadFactory,
1329 memoryManager,
1330 inputNoQuantizedValues,
1331 inputTensorInfo,
1332 kernelNoQuantizedValues,
1333 kernelTensorInfo,
1334 outputExpectedNoQuantizedValues,
1335 outputTensorInfo,
1336 3,
1337 3,
1338 layout,
1339 biasEnabled);
1340}
1341
1342
1343template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1344DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1345 armnn::IWorkloadFactory&,
1346 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1347 bool,
1348 armnn::DataLayout);
1349
1350template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1351DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1352 armnn::IWorkloadFactory&,
1353 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1354 bool,
1355 armnn::DataLayout);
1356
1357template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1358DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1359 armnn::IWorkloadFactory&,
1360 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1361 bool,
1362 armnn::DataLayout);
1363
1364template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1365DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1366 armnn::IWorkloadFactory&,
1367 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1368 bool,
1369 armnn::DataLayout);
1370
1371template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1372DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1373 armnn::IWorkloadFactory&,
1374 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1375 bool,
1376 armnn::DataLayout);
1377
1378template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1379DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1380 armnn::IWorkloadFactory&,
1381 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1382 bool,
1383 armnn::DataLayout);
1384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001385LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1386 armnn::IWorkloadFactory& workloadFactory,
1387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001391 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001392 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001393}
1394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001395LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1396 armnn::IWorkloadFactory& workloadFactory,
1397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1398 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001399{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001400 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1401 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001402}
1403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001404LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1405 armnn::IWorkloadFactory& workloadFactory,
1406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1407 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001408 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001410 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001411 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001412}
1413
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001414LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1415 armnn::IWorkloadFactory& workloadFactory,
1416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1417 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001418 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001419{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001420 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001421 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001422}
1423
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001424LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1425 armnn::IWorkloadFactory& workloadFactory,
1426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1427 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001428 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001429{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001430 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001431 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001432}
1433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001434LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1435 armnn::IWorkloadFactory& workloadFactory,
1436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001440 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001442}
1443
Bruno Goncalves22972f02019-04-26 21:03:24 -03001444LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1445 armnn::IWorkloadFactory& workloadFactory,
1446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1447{
1448 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001449 workloadFactory,
1450 memoryManager,
1451 0.f,
1452 0,
1453 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001454}
1455
Ruomei Yan88d44b82019-05-23 14:29:06 +01001456LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1457 armnn::IWorkloadFactory& workloadFactory,
1458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1459 bool biasEnabled,
1460 const armnn::DataLayout layout)
1461{
1462 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1463 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1464}
1465
1466LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1467 armnn::IWorkloadFactory& workloadFactory,
1468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1469 bool biasEnabled,
1470 const armnn::DataLayout layout)
1471{
1472 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1473 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1474}
1475
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001476LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001477 armnn::IWorkloadFactory& workloadFactory,
1478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1479 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001480 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001481{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001482 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1483 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001484}
1485
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001486LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 armnn::IWorkloadFactory& refWorkloadFactory,
1490 const armnn::DataLayout layout)
1491{
1492 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1493 workloadFactory, memoryManager, refWorkloadFactory, layout);
1494}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001495
1496LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1497 armnn::IWorkloadFactory& workloadFactory,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001499{
1500 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1501 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001503}
1504
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001505LayerTestResult<float,4> SimpleNormalizationWithinTest(
1506 armnn::IWorkloadFactory& workloadFactory,
1507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001508{
1509 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1510 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001511 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001512}
1513
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001514LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1515 armnn::IWorkloadFactory& workloadFactory,
1516 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001517{
1518 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1519 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001520 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001521}
1522
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001523LayerTestResult<float,2> SimpleSoftmaxTest(
1524 armnn::IWorkloadFactory& workloadFactory,
1525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1526 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001527{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001528 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001531LayerTestResult<float,3> Simple3dSoftmaxTest(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1534 float beta)
1535{
1536 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1537}
1538
1539LayerTestResult<float,4> Simple4dSoftmaxTest(
1540 armnn::IWorkloadFactory& workloadFactory,
1541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1542 float beta)
1543{
1544 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1545}
1546
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001547LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1548 armnn::IWorkloadFactory& workloadFactory,
1549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1550 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001551{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001552 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001553}
1554
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001555LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1556 armnn::IWorkloadFactory& workloadFactory,
1557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1558 float beta)
1559{
1560 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1561}
1562
1563LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 float beta)
1567{
1568 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1569}
1570
nikraj01248683f2019-05-29 16:46:50 +01001571LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1574 float beta)
1575{
1576 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1577}
1578
1579LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1580 armnn::IWorkloadFactory& workloadFactory,
1581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1582 float beta)
1583{
1584 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1585}
1586
1587LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1588 armnn::IWorkloadFactory& workloadFactory,
1589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1590 float beta)
1591{
1592 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1593}
1594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001595LayerTestResult<float,4> CompareNormalizationTest(
1596 armnn::IWorkloadFactory& workloadFactory,
1597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1598 armnn::IWorkloadFactory& refWorkloadFactory,
1599 armnn::NormalizationAlgorithmChannel normChannel,
1600 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001601{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001602 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001603}
1604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001605LayerTestResult<float,2> CompareSoftmaxTest(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001608 armnn::IWorkloadFactory& refWorkloadFactory,
1609 float beta)
1610{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001611 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1612 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001613}
1614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001615LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001618 armnn::IWorkloadFactory& refWorkloadFactory,
1619 float beta)
1620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001621 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1622 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001623}
1624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001625std::vector<LayerTestResult<float,3>> SplitterTest(
1626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001629 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001630}
1631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001632std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1633 armnn::IWorkloadFactory& workloadFactory,
1634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001636 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001637}
1638
Ruomei Yan25339c32019-05-28 16:48:20 +01001639std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1640 armnn::IWorkloadFactory& workloadFactory,
1641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1642{
1643 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1644}
1645
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001646LayerTestResult<float, 3> CopyViaSplitterTest(
1647 armnn::IWorkloadFactory& workloadFactory,
1648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001650 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001651}
1652
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001653LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1654 armnn::IWorkloadFactory& workloadFactory,
1655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001656{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001657 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001658}
1659
Ruomei Yan25339c32019-05-28 16:48:20 +01001660LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1661 armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1663{
1664 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1665}
1666
telsoa01c577f2c2018-08-31 09:22:23 +01001667LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001668 armnn::IWorkloadFactory& workloadFactory,
1669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001671 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001672 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1673 { 2., 3., 3., 4. }));
1674
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001675 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001676 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1677 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1678 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001679 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001680 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001681}
1682
1683LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001684 armnn::IWorkloadFactory& workloadFactory,
1685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001686{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001687 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001688 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1689 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1690 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1691
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001692 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001693 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1694 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1695 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1696 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1697 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1698 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1699 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1700 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001701 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1702 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001703}
1704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001705LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1706 armnn::IWorkloadFactory& workloadFactory,
1707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001708{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001709 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001710 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1711 {2., 3., 3., 4.}));
1712
1713
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001714 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001715 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1716 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1717 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1718
Conor Kennedyb9971c92019-05-07 07:14:23 +01001719 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001720 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001721}
1722
Conor Kennedyb9971c92019-05-07 07:14:23 +01001723LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1724 armnn::IWorkloadFactory& workloadFactory,
1725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1726{
1727 const float qScale = 1.0f;
1728 const int32_t qOffset = 0;
1729
1730 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1731 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1732
1733 armnn::TensorInfo inputDesc({2, 2}, datatype);
1734 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1735 std::vector<float>{2., 3., 3., 4.}));
1736
1737 armnn::TensorInfo outputDesc({2, 4}, datatype);
1738 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1739 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1740 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1741
1742 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1743 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1744
1745}
1746
1747LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1748 armnn::IWorkloadFactory& workloadFactory,
1749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1750{
1751 const float qScale = 1.0f;
1752 const int32_t qOffset = 0;
1753
1754 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1755 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1756
1757 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1758 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1759 std::vector<float>({ 2., 3., 3., 4. })));
1760
1761 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1762 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1763 qOffset, std::vector<float>(
1764 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1765 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1766
1767 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1768 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1769}
1770
1771LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1772 armnn::IWorkloadFactory& workloadFactory,
1773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1774{
1775 const float qScale = 2.0f;
1776 const int32_t qOffset = 0;
1777
1778 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1779 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1780
1781 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1782 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1783 qOffset, std::vector<float>(
1784 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1785 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1786
1787 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1788 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1789 qOffset, std::vector<float>(
1790 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1791 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1792 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1793 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1794 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1795 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1796
1797 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1798 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1799}
1800
1801LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1802 armnn::IWorkloadFactory& workloadFactory,
1803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1804{
1805 const float qScale = 1.0f;
1806 const int32_t qOffset = 0;
1807
1808 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1809
1810 armnn::TensorInfo inputDesc({2, 2}, datatype);
1811 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1812 qOffset, std::vector<float>{2., 3., 3., 4.}));
1813
1814 armnn::TensorInfo outputDesc({2, 4}, datatype);
1815 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1816 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1817 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1818
1819 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1820 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1821}
1822
Jim Flynn4ed6c832019-05-20 11:02:46 +01001823LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001824 armnn::IWorkloadFactory& workloadFactory,
1825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001826{
surmeh013537c2c2018-05-18 16:31:43 +01001827 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001828 unsigned int outputHeight = 6;
1829 unsigned int outputChannels = 3;
1830
surmeh013537c2c2018-05-18 16:31:43 +01001831 unsigned int inputWidth1 = 3;
1832 unsigned int inputHeight1 = 6;
1833 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001834
surmeh013537c2c2018-05-18 16:31:43 +01001835 unsigned int inputWidth2 = 3;
1836 unsigned int inputHeight2 = 6;
1837 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001838
telsoa01c577f2c2018-08-31 09:22:23 +01001839 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001840 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1841 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1842 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001843
1844 LayerTestResult<float,3> ret(outputTensorInfo);
1845
telsoa014fcda012018-03-09 14:13:49 +00001846 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001847 {
1848 1.0f, 2.0f, 3.0f,
1849 4.0f, 5.0f, 6.0f,
1850 7.0f, 8.0f, 9.0f,
1851 10.0f, 11.0f, 12.0f,
1852 13.0f, 14.0f, 15.0f,
1853 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001854
surmeh013537c2c2018-05-18 16:31:43 +01001855 19.0f, 20.0f, 21.0f,
1856 22.0f, 23.0f, 24.0f,
1857 25.0f, 26.0f, 27.0f,
1858 28.0f, 29.0f, 30.0f,
1859 31.0f, 32.0f, 33.0f,
1860 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001861
surmeh013537c2c2018-05-18 16:31:43 +01001862 37.0f, 38.0f, 39.0f,
1863 40.0f, 41.0f, 42.0f,
1864 43.0f, 44.0f, 45.0f,
1865 46.0f, 47.0f, 48.0f,
1866 49.0f, 50.0f, 51.0f,
1867 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001868 })
1869 );
1870
telsoa014fcda012018-03-09 14:13:49 +00001871 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1872 {
surmeh013537c2c2018-05-18 16:31:43 +01001873 1.0f, 2.0f, 3.0f,
1874 4.0f, 5.0f, 6.0f,
1875 7.0f, 8.0f, 9.0f,
1876 10.0f, 11.0f, 12.0f,
1877 13.0f, 14.0f, 15.0f,
1878 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001879
surmeh013537c2c2018-05-18 16:31:43 +01001880 19.0f, 20.0f, 21.0f,
1881 22.0f, 23.0f, 24.0f,
1882 25.0f, 26.0f, 27.0f,
1883 28.0f, 29.0f, 30.0f,
1884 31.0f, 32.0f, 33.0f,
1885 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001886 })
1887 );
1888
1889 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1890 {
surmeh013537c2c2018-05-18 16:31:43 +01001891 37.0f, 38.0f, 39.0f,
1892 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001893 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001894 46.0f, 47.0f, 48.0f,
1895 49.0f, 50.0f, 51.0f,
1896 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001897 })
1898 );
1899
telsoa01c577f2c2018-08-31 09:22:23 +01001900 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001901 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001902
telsoa01c577f2c2018-08-31 09:22:23 +01001903 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001904 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001905
telsoa014fcda012018-03-09 14:13:49 +00001906 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1907
1908 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1909
1910 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1911 subTensorsSupported ?
1912 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1913 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1914
1915 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1916 subTensorsSupported ?
1917 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1918 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1919
Jim Flynne242f2d2019-05-22 14:24:13 +01001920 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001921 armnn::WorkloadInfo info;
1922 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1923 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001924 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1925
1926 data.m_ViewOrigins.push_back(window1);
1927 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001928
Jim Flynn4ed6c832019-05-20 11:02:46 +01001929 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001930
1931 inputHandle1->Allocate();
1932 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001933 outputHandle->Allocate();
1934
1935 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1936 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001937
Derek Lambertif30f7d32019-04-09 10:25:02 +01001938 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001939 workload->Execute();
1940
1941 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1942
1943 return ret;
1944}
1945
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001946LayerTestResult<float,4> AdditionTest(
1947 armnn::IWorkloadFactory& workloadFactory,
1948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001949{
1950 unsigned int batchSize = 2;
1951 unsigned int channels = 2;
1952 unsigned int height = 2;
1953 unsigned int width = 3;
1954
1955 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1956 armnn::TensorInfo outputTensorInfo;
1957
1958 unsigned int shape[] = {batchSize, channels, height, width};
1959
1960 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1961 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1962 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1963
1964
1965 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1966 {
1967 0.0f, 2.0f, 1.0f,
1968 0.2f, 1.0f, 2.0f,
1969
1970 1.0f, 2.0f, 1.0f,
1971 0.2f, 1.0f, 2.0f,
1972
1973 0.0f, 2.0f, 1.0f,
1974 4.2f, 1.0f, 2.0f,
1975
1976 0.0f, 0.0f, 1.0f,
1977 0.2f, 1.0f, 2.0f,
1978 }));
1979
1980 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1981 {
1982 1.0f, 2.0f, 1.0f,
1983 0.0f, 1.0f, 2.0f,
1984
1985 1.0f, 2.0f, -2.0f,
1986 0.2f, 1.0f, 2.0f,
1987
1988 0.0f, 2.0f, 1.0f,
1989 4.2f, 0.0f, -3.0f,
1990
1991 0.0f, 0.0f, 1.0f,
1992 0.7f, 1.0f, 5.0f,
1993 }));
1994
1995 LayerTestResult<float,4> ret(outputTensorInfo);
1996 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1997 {
1998 1.0f, 4.0f, 2.0f,
1999 0.2f, 2.0f, 4.0f,
2000
2001 2.0f, 4.0f, -1.0f,
2002 0.4f, 2.0f, 4.0f,
2003
2004 0.0f, 4.0f, 2.0f,
2005 8.4f, 1.0f, -1.0f,
2006
2007 0.0f, 0.0f, 2.0f,
2008 0.9f, 2.0f, 7.0f,
2009 }));
2010
2011 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2012 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2013 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2014
2015 armnn::AdditionQueueDescriptor data;
2016 armnn::WorkloadInfo info;
2017 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2018 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2019 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2020
2021 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2022
2023 inputHandle1->Allocate();
2024 inputHandle2->Allocate();
2025 outputHandle->Allocate();
2026
2027 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2028 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2029
Derek Lambertif30f7d32019-04-09 10:25:02 +01002030 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002031 workload->Execute();
2032
2033 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2034
2035 return ret;
2036}
2037
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002038template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 float qScale,
2043 int32_t qOffset)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2046 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2047 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002048
2049 if (armnn::IsQuantizedType<T>())
2050 {
2051 inputTensorInfo1.SetQuantizationScale(qScale);
2052 inputTensorInfo1.SetQuantizationOffset(qOffset);
2053 inputTensorInfo2.SetQuantizationScale(qScale);
2054 inputTensorInfo2.SetQuantizationOffset(qOffset);
2055 outputTensorInfo.SetQuantizationScale(qScale);
2056 outputTensorInfo.SetQuantizationOffset(qOffset);
2057 }
2058
2059 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2060 {
2061 0.0f,
2062 1.0f,
2063
2064 2.0f,
2065 3.0f,
2066
2067 4.0f,
2068 5.0f,
2069 }));
2070
2071 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2072 {
2073 0.5f, 1.5f, 2.5f,
2074 3.5f, 4.5f, 5.5f,
2075 }));
2076
2077 LayerTestResult<T,4> ret(outputTensorInfo);
2078 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2079 {
2080 0.5f, 1.5f, 2.5f,
2081 4.5f, 5.5f, 6.5f,
2082
2083 2.5f, 3.5f, 4.5f,
2084 6.5f, 7.5f, 8.5f,
2085
2086 4.5f, 5.5f, 6.5f,
2087 8.5f, 9.5f, 10.5f,
2088 }));
2089
2090 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2091 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2092 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2093
2094 armnn::AdditionQueueDescriptor data;
2095 armnn::WorkloadInfo info;
2096 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2097 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2098 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2099
2100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2101
2102 inputHandle1->Allocate();
2103 inputHandle2->Allocate();
2104 outputHandle->Allocate();
2105
2106 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2107 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2108
Derek Lambertif30f7d32019-04-09 10:25:02 +01002109 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002110 workload->Execute();
2111
2112 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2113
2114 return ret;
2115}
2116
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002117template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002118LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2119 armnn::IWorkloadFactory& workloadFactory,
2120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002121 float qScale,
2122 int32_t qOffset)
2123{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002124 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2125 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2126 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002127
2128 if (armnn::IsQuantizedType<T>())
2129 {
2130 inputTensorInfo1.SetQuantizationScale(qScale);
2131 inputTensorInfo1.SetQuantizationOffset(qOffset);
2132 inputTensorInfo2.SetQuantizationScale(qScale);
2133 inputTensorInfo2.SetQuantizationOffset(qOffset);
2134 outputTensorInfo.SetQuantizationScale(qScale);
2135 outputTensorInfo.SetQuantizationOffset(qOffset);
2136 }
2137
2138 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2139 {
2140 0.0f, 1.0f, 2.0f,
2141 3.0f, 4.0f, 5.0f,
2142 6.0f, 7.0f, 8.0f,
2143 9.0f, 10.0f, 11.0f,
2144 12.0f, 13.0f, 14.0f,
2145 15.0f, 16.0f, 17.0f,
2146 }));
2147
2148 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2149 {
2150 0.5f,
2151 }));
2152
2153 LayerTestResult<T,4> ret(outputTensorInfo);
2154 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2155 {
2156 0.5f, 1.5f, 2.5f,
2157 3.5f, 4.5f, 5.5f,
2158 6.5f, 7.5f, 8.5f,
2159 9.5f, 10.5f, 11.5f,
2160 12.5f, 13.5f, 14.5f,
2161 15.5f, 16.5f, 17.5f,
2162 }));
2163
2164 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2166 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2167
2168 armnn::AdditionQueueDescriptor data;
2169 armnn::WorkloadInfo info;
2170 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2171 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2172 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2173
2174 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2175
2176 inputHandle1->Allocate();
2177 inputHandle2->Allocate();
2178 outputHandle->Allocate();
2179
2180 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2181 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2182
Derek Lambertif30f7d32019-04-09 10:25:02 +01002183 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002184 workload->Execute();
2185
2186 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2187
2188 return ret;
2189}
2190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002191LayerTestResult<float, 4> AdditionBroadcastTest(
2192 armnn::IWorkloadFactory& workloadFactory,
2193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002194{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002195 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2196 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002197}
2198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002199LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2200 armnn::IWorkloadFactory& workloadFactory,
2201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002202{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002203 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2204 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002205}
2206
Sadik Armagan2999a022019-04-09 14:20:12 +01002207LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2208 armnn::IWorkloadFactory& workloadFactory,
2209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2210{
2211 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2212 workloadFactory, memoryManager, 2.f, 0);
2213}
2214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002215LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2216 armnn::IWorkloadFactory& workloadFactory,
2217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002218{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002219 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2220 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002221}
2222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002223LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2224 armnn::IWorkloadFactory& workloadFactory,
2225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002226{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002227 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2228 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002229}
2230
Sadik Armagan2999a022019-04-09 14:20:12 +01002231LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2232 armnn::IWorkloadFactory& workloadFactory,
2233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2234{
2235 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2236 workloadFactory, memoryManager, 0.1333333f, 0);
2237}
2238
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239LayerTestResult<float,4> CompareAdditionTest(
2240 armnn::IWorkloadFactory& workloadFactory,
2241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2242 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002243{
2244 unsigned int batchSize = 4;
2245 unsigned int channels = 1;
2246 unsigned int height = 2;
2247 unsigned int width = 3;
2248
2249 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2250 armnn::TensorInfo outputTensorInfo;
2251
2252 unsigned int shape[] = {batchSize, channels, height, width};
2253
2254 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2255 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2256 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2257
2258 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2259 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2260
2261 LayerTestResult<float,4> ret(outputTensorInfo);
2262
2263 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2264 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2266
2267 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2268 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2269 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2270
2271 armnn::AdditionQueueDescriptor data;
2272 armnn::WorkloadInfo info;
2273 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2274 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2275 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2276
2277 armnn::AdditionQueueDescriptor refData = data;
2278 armnn::WorkloadInfo refInfo = info;
2279 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2280 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2281 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2282
2283 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2284 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2285
2286 inputHandle1->Allocate();
2287 inputHandle2->Allocate();
2288 outputHandle->Allocate();
2289 inputHandle1Ref->Allocate();
2290 inputHandle2Ref->Allocate();
2291 outputHandleRef->Allocate();
2292
2293 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2294 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2295 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2296 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2297
Derek Lambertif30f7d32019-04-09 10:25:02 +01002298 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002299 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002300 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002301 workloadRef->Execute();
2302
2303 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2304 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2305
2306 return ret;
2307}
2308
surmeh01bceff2f2018-03-29 16:29:27 +01002309namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002310template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002311LayerTestResult<T, 4> DivisionTestHelper(
2312 armnn::IWorkloadFactory& workloadFactory,
2313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2314 const unsigned int shape0[4],
2315 const std::vector<T>& values0,
2316 float scale0,
2317 int32_t offset0,
2318 const unsigned int shape1[4],
2319 const std::vector<T> & values1,
2320 float scale1,
2321 int32_t offset1,
2322 const unsigned int outShape[4],
2323 const std::vector<T> & outValues,
2324 float outScale,
2325 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002326{
Sadik Armagan2999a022019-04-09 14:20:12 +01002327 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2328 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2329 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002330
David Beck5cd01f32018-09-12 16:00:08 +01002331 inputTensorInfo0.SetQuantizationScale(scale0);
2332 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002333
David Beck5cd01f32018-09-12 16:00:08 +01002334 inputTensorInfo1.SetQuantizationScale(scale1);
2335 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002336
David Beck5cd01f32018-09-12 16:00:08 +01002337 outputTensorInfo.SetQuantizationScale(outScale);
2338 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002339
David Beck5cd01f32018-09-12 16:00:08 +01002340 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2341 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002342
David Beck5cd01f32018-09-12 16:00:08 +01002343 LayerTestResult<T, 4> result(outputTensorInfo);
2344 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002345
David Beck5cd01f32018-09-12 16:00:08 +01002346 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2347 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2348 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002349
David Beck5cd01f32018-09-12 16:00:08 +01002350 armnn::DivisionQueueDescriptor data;
2351 armnn::WorkloadInfo info;
2352 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2353 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2354 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002355
David Beck5cd01f32018-09-12 16:00:08 +01002356 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002357
David Beck5cd01f32018-09-12 16:00:08 +01002358 inputHandle0->Allocate();
2359 inputHandle1->Allocate();
2360 outputHandle->Allocate();
2361
2362 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2363 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2364
Derek Lambertif30f7d32019-04-09 10:25:02 +01002365 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002366 workload->Execute();
2367
2368 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2369
2370 return result;
2371}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002372} // anonymous namespace
2373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002374LayerTestResult<float,4> DivisionByZeroTest(
2375 armnn::IWorkloadFactory& workloadFactory,
2376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002377{
2378 const unsigned int width = 2;
2379 const unsigned int height = 2;
2380 const unsigned int channelCount = 2;
2381 const unsigned int batchSize = 2;
2382
2383 unsigned int shape[] = { batchSize, channelCount, height, width };
2384
2385 std::vector<float> input0({
2386 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2387 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2388
2389 std::vector<float> input1({
2390 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2391 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2392
2393 std::vector<float> output({
2394 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2395 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2396
Sadik Armagan2999a022019-04-09 14:20:12 +01002397 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2398 memoryManager,
2399 shape, input0, 1.0f, 0,
2400 shape, input1, 1.0f, 0,
2401 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002402}
2403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002404LayerTestResult<float,4> DivisionTest(
2405 armnn::IWorkloadFactory& workloadFactory,
2406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002407{
2408 const unsigned int width = 2;
2409 const unsigned int height = 2;
2410 const unsigned int channelCount = 2;
2411 const unsigned int batchSize = 2;
2412
2413 unsigned int shape[] = { batchSize, channelCount, height, width };
2414
2415 std::vector<float> input0({
2416 2, 2, 2, 2, 3, 3, 3, 3,
2417 4, 4, 4, 4, 5, 5, 5, 5 });
2418
2419 std::vector<float> input1({
2420 1, 1, 1, 1, 2, 2, 2, 2,
2421 4, 4, 4, 4, 4, 4, 4, 4 });
2422
2423 std::vector<float> output({
2424 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2425 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2426
David Beck5cd01f32018-09-12 16:00:08 +01002427
Sadik Armagan2999a022019-04-09 14:20:12 +01002428 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2429 memoryManager,
2430 shape, input0, 1.0f, 0,
2431 shape, input1, 1.0f, 0,
2432 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002433}
2434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002435LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2436 armnn::IWorkloadFactory& workloadFactory,
2437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002438{
2439 unsigned int shape0[] = { 1, 2, 2, 2 };
2440 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2441
2442 unsigned int shape1[] = { 1, 1, 1, 1 };
2443 std::vector<float> input1({ 2 });
2444
2445 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2446
David Beck5cd01f32018-09-12 16:00:08 +01002447
Sadik Armagan2999a022019-04-09 14:20:12 +01002448 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2449 memoryManager,
2450 shape0, input0, 1.0f, 0,
2451 shape1, input1, 1.0f, 0,
2452 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002453}
2454
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002455LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2456 armnn::IWorkloadFactory& workloadFactory,
2457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002458{
2459 unsigned int shape0[] = { 1, 3, 3, 2 };
2460 std::vector<float> input0({
2461 1, 4, 3, 8, 5, 12,
2462 7, 16, 9, 20, 11, 24,
2463 13, 28, 15, 32, 17, 36});
2464
2465 unsigned int shape1[] = { 1, 1, 1, 2 };
2466 std::vector<float> input1({ 1, 2 });
2467
2468 std::vector<float> output({
2469 1, 2, 3, 4, 5, 6,
2470 7, 8, 9, 10, 11, 12,
2471 13, 14, 15, 16, 17, 18});
2472
Sadik Armagan2999a022019-04-09 14:20:12 +01002473 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2474 memoryManager,
2475 shape0, input0, 1.0f, 0,
2476 shape1, input1, 1.0f, 0,
2477 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002478}
2479
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002480LayerTestResult<uint8_t,4> DivisionUint8Test(
2481 armnn::IWorkloadFactory& workloadFactory,
2482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002483{
2484 const unsigned int width = 2;
2485 const unsigned int height = 2;
2486 const unsigned int channelCount = 2;
2487 const unsigned int batchSize = 2;
2488
2489 unsigned int shape[] = { batchSize, channelCount, height, width };
2490
2491 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2492 4, 4, 4, 4, 5, 5, 5, 5 });
2493
2494 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2495 4, 4, 4, 4, 4, 4, 4, 4 });
2496
2497 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2498 4, 4, 4, 4, 5, 5, 5, 5});
2499
2500
Sadik Armagan2999a022019-04-09 14:20:12 +01002501 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2502 memoryManager,
2503 shape, input0, 1.0f, 0,
2504 shape, input1, 1.0f, 0,
2505 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002506}
2507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002508LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2509 armnn::IWorkloadFactory& workloadFactory,
2510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002511{
2512 unsigned int shape0[] = { 1, 2, 2, 2 };
2513 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2514
2515 unsigned int shape1[] = { 1, 1, 1, 1 };
2516 std::vector<uint8_t> input1({ 2 });
2517
2518 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2519
Sadik Armagan2999a022019-04-09 14:20:12 +01002520 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2521 memoryManager,
2522 shape0, input0, 1.0f, 0,
2523 shape1, input1, 1.0f, 0,
2524 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002525}
2526
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002527LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2528 armnn::IWorkloadFactory& workloadFactory,
2529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002530{
2531 unsigned int shape0[] = { 1, 3, 3, 2 };
2532 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2533 7, 16, 9, 20, 11, 24,
2534 13, 28, 15, 32, 17, 36});
2535
2536 unsigned int shape1[] = { 1, 1, 1, 2 };
2537 std::vector<uint8_t> input1({ 1, 2 });
2538
2539 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2540 7, 8, 9, 10, 11, 12,
2541 13, 14, 15, 16, 17, 18});
2542
Sadik Armagan2999a022019-04-09 14:20:12 +01002543 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2544 memoryManager,
2545 shape0, input0, 1.0f, 0,
2546 shape1, input1, 1.0f, 0,
2547 shape0, output, 1.0f, 0);
2548}
2549
2550LayerTestResult<int16_t,4> DivisionInt16Test(
2551 armnn::IWorkloadFactory& workloadFactory,
2552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2553{
2554 unsigned int shape[] = { 2, 2, 2, 2 };
2555
2556 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2557 4, 4, 4, 4, 5, 5, 5, 5 });
2558
2559 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2560 4, 4, 4, 4, 4, 4, 4, 4 });
2561
2562 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2563 4, 4, 4, 4, 5, 5, 5, 5});
2564
2565
2566 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2567 memoryManager,
2568 shape, input0, 1.0f, 0,
2569 shape, input1, 1.0f, 0,
2570 shape, output, 0.25f, 0);
2571}
2572
2573LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2574 armnn::IWorkloadFactory& workloadFactory,
2575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2576{
2577 unsigned int shape0[] = { 1, 2, 2, 2 };
2578 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2579
2580 unsigned int shape1[] = { 1, 1, 1, 1 };
2581 std::vector<int16_t> input1({ 2 });
2582
2583 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2584
2585 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2586 memoryManager,
2587 shape0, input0, 1.0f, 0,
2588 shape1, input1, 1.0f, 0,
2589 shape0, output, 1.0f, 0);
2590}
2591
2592LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2593 armnn::IWorkloadFactory& workloadFactory,
2594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2595{
2596 unsigned int shape0[] = { 1, 3, 3, 2 };
2597 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2598 7, 16, 9, 20, 11, 24,
2599 13, 28, 15, 32, 17, 36});
2600
2601 unsigned int shape1[] = { 1, 1, 1, 2 };
2602 std::vector<int16_t> input1({ 1, 2 });
2603
2604 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2605 7, 8, 9, 10, 11, 12,
2606 13, 14, 15, 16, 17, 18});
2607
2608 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2609 memoryManager,
2610 shape0, input0, 1.0f, 0,
2611 shape1, input1, 1.0f, 0,
2612 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002613}
2614
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002615template<typename DescriptorType>
2616std::unique_ptr<armnn::IWorkload> CreateWorkload(
2617 const armnn::IWorkloadFactory& workloadFactory,
2618 const armnn::WorkloadInfo& info,
2619 const DescriptorType& descriptor)
2620{
2621 return CreateWorkload(workloadFactory, info, descriptor);
2622};
2623
2624template<>
2625std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2626 const armnn::IWorkloadFactory& workloadFactory,
2627 const armnn::WorkloadInfo& info,
2628 const armnn::MaximumQueueDescriptor& descriptor)
2629{
2630 return workloadFactory.CreateMaximum(descriptor, info);
2631}
2632
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002633template<>
2634std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2635 const armnn::IWorkloadFactory& workloadFactory,
2636 const armnn::WorkloadInfo& info,
2637 const armnn::MinimumQueueDescriptor& descriptor)
2638{
2639 return workloadFactory.CreateMinimum(descriptor, info);
2640}
2641
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002642template<>
2643std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2644 const armnn::IWorkloadFactory& workloadFactory,
2645 const armnn::WorkloadInfo& info,
2646 const armnn::EqualQueueDescriptor& descriptor)
2647{
2648 return workloadFactory.CreateEqual(descriptor, info);
2649}
2650
FrancisMurtagh878f0232018-12-19 10:56:15 +00002651template<>
2652std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2653 const armnn::IWorkloadFactory& workloadFactory,
2654 const armnn::WorkloadInfo& info,
2655 const armnn::GreaterQueueDescriptor& descriptor)
2656{
2657 return workloadFactory.CreateGreater(descriptor, info);
2658}
2659
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002660namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002661
2662template <typename Descriptor,
2663 armnn::DataType ArmnnTypeInput,
2664 armnn::DataType ArmnnTypeOutput,
2665 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2666 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2667LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2668 armnn::IWorkloadFactory & workloadFactory,
2669 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2670 const unsigned int shape0[4], std::vector<TInput> values0,
2671 const unsigned int shape1[4], std::vector<TInput> values1,
2672 const unsigned int outShape[4], std::vector<TOutput> outValues,
2673 float qScale = 0.0f, int qOffset = 0)
2674{
2675 const size_t dimensionCount = 4;
2676 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2677 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2678 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2679
2680 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2681 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2682
2683 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002684 {
kevmay012b4d88e2019-01-24 14:05:09 +00002685 inputTensorInfo0.SetQuantizationScale(qScale);
2686 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002687
kevmay012b4d88e2019-01-24 14:05:09 +00002688 inputTensorInfo1.SetQuantizationScale(qScale);
2689 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002690
kevmay012b4d88e2019-01-24 14:05:09 +00002691 outputTensorInfo.SetQuantizationScale(qScale);
2692 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002693 }
kevmay012b4d88e2019-01-24 14:05:09 +00002694
2695 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2696
2697 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2698 {
2699 ret.compareBoolean = true;
2700 }
2701
2702 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2703 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2704 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2705
2706 Descriptor data;
2707 armnn::WorkloadInfo info;
2708 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2709 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2710 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2711 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2712
2713 inputHandle0->Allocate();
2714 inputHandle1->Allocate();
2715 outputHandle->Allocate();
2716
2717 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2718 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2719
Derek Lambertif30f7d32019-04-09 10:25:02 +01002720 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002721 ExecuteWorkload(*workload, memoryManager);
2722
2723 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2724
2725 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2726 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002727}
2728
kevmay012b4d88e2019-01-24 14:05:09 +00002729template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2730LayerTestResult<T, 4> ElementwiseTestHelper(
2731 armnn::IWorkloadFactory & workloadFactory,
2732 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2733 const unsigned int shape0[4], std::vector<T> values0,
2734 const unsigned int shape1[4], std::vector<T> values1,
2735 const unsigned int outShape[4], std::vector<T> outValues,
2736 float qScale = 0.0f, int qOffset = 0)
2737{
2738 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2739 (workloadFactory,
2740 memoryManager,
2741 shape0,
2742 values0,
2743 shape1,
2744 values1,
2745 outShape,
2746 outValues,
2747 qScale,
2748 qOffset);
2749}
2750}
2751
2752LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002754{
2755 const unsigned int width = 2;
2756 const unsigned int height = 2;
2757 const unsigned int channelCount = 2;
2758 const unsigned int batchSize = 2;
2759
2760 unsigned int shape[] = { batchSize, channelCount, height, width };
2761
2762 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2763 3, 3, 3, 3, 4, 4, 4, 4 });
2764
2765 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2766 5, 5, 5, 5, 4, 4, 4, 4 });
2767
kevmay012b4d88e2019-01-24 14:05:09 +00002768 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2769 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002770
kevmay012b4d88e2019-01-24 14:05:09 +00002771 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002772 workloadFactory,
2773 memoryManager,
2774 shape,
2775 input0,
2776 shape,
2777 input1,
2778 shape,
2779 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002780}
2781
kevmay012b4d88e2019-01-24 14:05:09 +00002782LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002783 armnn::IWorkloadFactory& workloadFactory,
2784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2785{
2786 unsigned int shape0[] = { 1, 2, 2, 2 };
2787 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2788
2789 unsigned int shape1[] = { 1, 1, 1, 1 };
2790 std::vector<float> input1({ 1 });
2791
kevmay012b4d88e2019-01-24 14:05:09 +00002792 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002793
kevmay012b4d88e2019-01-24 14:05:09 +00002794 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002795 workloadFactory,
2796 memoryManager,
2797 shape0,
2798 input0,
2799 shape1,
2800 input1,
2801 shape0,
2802 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002803}
2804
kevmay012b4d88e2019-01-24 14:05:09 +00002805LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2808{
2809 const unsigned int shape0[] = { 1, 2, 2, 3 };
2810 const unsigned int shape1[] = { 1, 1, 1, 3 };
2811
2812 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2813 7, 8, 9, 10, 11, 12 });
2814
2815 std::vector<float> input1({ 1, 2, 3});
2816
kevmay012b4d88e2019-01-24 14:05:09 +00002817 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2818 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002819
kevmay012b4d88e2019-01-24 14:05:09 +00002820 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002821 workloadFactory,
2822 memoryManager,
2823 shape0,
2824 input0,
2825 shape1,
2826 input1,
2827 shape0,
2828 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002829}
2830
2831LayerTestResult<uint8_t, 4> EqualUint8Test(
2832 armnn::IWorkloadFactory& workloadFactory,
2833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2834{
2835 unsigned int shape[] = { 2, 2, 2, 2 };
2836
2837 // See dequantized values to the right.
2838 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002839 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002840
2841 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2842 3, 3, 3, 3, 5, 5, 5, 5 });
2843
2844 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2845 1, 1, 1, 1, 0, 0, 0, 0 });
2846
kevmay012b4d88e2019-01-24 14:05:09 +00002847 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2848 armnn::DataType::QuantisedAsymm8,
2849 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002850 workloadFactory,
2851 memoryManager,
2852 shape,
2853 input0,
2854 shape,
2855 input1,
2856 shape,
2857 output,
2858 1.0f,
2859 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002860}
2861
2862LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2863 armnn::IWorkloadFactory& workloadFactory,
2864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2865{
2866 const unsigned int shape0[] = { 1, 2, 2, 3 };
2867 const unsigned int shape1[] = { 1, 1, 1, 1 };
2868
2869 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2870 7, 8, 9, 10, 11, 12 });
2871
2872 std::vector<uint8_t> input1({ 1 });
2873
2874 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2875 0, 0, 0, 0, 0, 0 });
2876
kevmay012b4d88e2019-01-24 14:05:09 +00002877 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2878 armnn::DataType::QuantisedAsymm8,
2879 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002880 workloadFactory,
2881 memoryManager,
2882 shape0,
2883 input0,
2884 shape1,
2885 input1,
2886 shape0,
2887 output,
2888 1.0f,
2889 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002890}
2891
2892LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2893 armnn::IWorkloadFactory& workloadFactory,
2894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2895{
2896 const unsigned int shape0[] = { 1, 2, 2, 3 };
2897 const unsigned int shape1[] = { 1, 1, 1, 3 };
2898
2899 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2900 7, 8, 9, 10, 11, 12 });
2901
2902 std::vector<uint8_t> input1({ 1, 1, 3});
2903
2904 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2905 0, 0, 0, 0, 0, 0 });
2906
kevmay012b4d88e2019-01-24 14:05:09 +00002907 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2908 armnn::DataType::QuantisedAsymm8,
2909 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002910 workloadFactory,
2911 memoryManager,
2912 shape0,
2913 input0,
2914 shape1,
2915 input1,
2916 shape0,
2917 output,
2918 1.0f,
2919 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002920}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002921
kevmay012b4d88e2019-01-24 14:05:09 +00002922LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2924{
2925 const unsigned int width = 2;
2926 const unsigned int height = 2;
2927 const unsigned int channelCount = 2;
2928 const unsigned int batchSize = 2;
2929
2930 unsigned int shape[] = { batchSize, channelCount, height, width };
2931
2932 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2933 3, 3, 3, 3, 4, 4, 4, 4 });
2934
2935 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2936 5, 5, 5, 5, 4, 4, 4, 4 });
2937
kevmay012b4d88e2019-01-24 14:05:09 +00002938 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2939 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002940
kevmay012b4d88e2019-01-24 14:05:09 +00002941 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002942 workloadFactory,
2943 memoryManager,
2944 shape,
2945 input0,
2946 shape,
2947 input1,
2948 shape,
2949 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002950}
2951
kevmay012b4d88e2019-01-24 14:05:09 +00002952LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002953 armnn::IWorkloadFactory& workloadFactory,
2954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2955{
2956 unsigned int shape0[] = { 1, 2, 2, 2 };
2957 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2958
2959 unsigned int shape1[] = { 1, 1, 1, 1 };
2960 std::vector<float> input1({ 1 });
2961
kevmay012b4d88e2019-01-24 14:05:09 +00002962 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002963
kevmay012b4d88e2019-01-24 14:05:09 +00002964 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002965 workloadFactory,
2966 memoryManager,
2967 shape0,
2968 input0,
2969 shape1,
2970 input1,
2971 shape0,
2972 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002973}
2974
kevmay012b4d88e2019-01-24 14:05:09 +00002975LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002976 armnn::IWorkloadFactory& workloadFactory,
2977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2978{
2979 const unsigned int shape0[] = { 1, 2, 2, 3 };
2980 const unsigned int shape1[] = { 1, 1, 1, 3 };
2981
2982 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2983 7, 8, 9, 10, 11, 12 });
2984
2985 std::vector<float> input1({ 1, 3, 2});
2986
kevmay012b4d88e2019-01-24 14:05:09 +00002987 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2988 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002989
kevmay012b4d88e2019-01-24 14:05:09 +00002990 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002991 workloadFactory,
2992 memoryManager,
2993 shape0,
2994 input0,
2995 shape1,
2996 input1,
2997 shape0,
2998 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002999}
3000
3001LayerTestResult<uint8_t, 4> GreaterUint8Test(
3002 armnn::IWorkloadFactory& workloadFactory,
3003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3004{
3005 unsigned int shape[] = { 2, 2, 2, 2 };
3006
3007 // See dequantized values to the right.
3008 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3009 3, 3, 3, 3, 5, 5, 5, 5 });
3010
3011 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3012 2, 2, 2, 2, 5, 5, 5, 5 });
3013
3014 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3015 1, 1, 1, 1, 0, 0, 0, 0 });
3016
kevmay012b4d88e2019-01-24 14:05:09 +00003017 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3018 armnn::DataType::QuantisedAsymm8,
3019 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003020 workloadFactory,
3021 memoryManager,
3022 shape,
3023 input0,
3024 shape,
3025 input1,
3026 shape,
3027 output,
3028 1.0f,
3029 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003030}
3031
3032LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3033 armnn::IWorkloadFactory& workloadFactory,
3034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3035{
3036 const unsigned int shape0[] = { 1, 2, 2, 3 };
3037 const unsigned int shape1[] = { 1, 1, 1, 1 };
3038
3039 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3040 7, 8, 9, 10, 11, 12 });
3041
3042 std::vector<uint8_t> input1({ 1 });
3043
3044 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3045 1, 1, 1, 1, 1, 1 });
3046
kevmay012b4d88e2019-01-24 14:05:09 +00003047 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3048 armnn::DataType::QuantisedAsymm8,
3049 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003050 workloadFactory,
3051 memoryManager,
3052 shape0,
3053 input0,
3054 shape1,
3055 input1,
3056 shape0,
3057 output,
3058 1.0f,
3059 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003060}
3061
3062LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3063 armnn::IWorkloadFactory& workloadFactory,
3064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3065{
3066 const unsigned int shape0[] = { 1, 2, 2, 3 };
3067 const unsigned int shape1[] = { 1, 1, 1, 3 };
3068
3069 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3070 7, 8, 9, 10, 11, 12 });
3071
3072 std::vector<uint8_t> input1({ 1, 1, 3});
3073
3074 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3075 1, 1, 1, 1, 1, 1 });
3076
kevmay012b4d88e2019-01-24 14:05:09 +00003077 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3078 armnn::DataType::QuantisedAsymm8,
3079 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003080 workloadFactory,
3081 memoryManager,
3082 shape0,
3083 input0,
3084 shape1,
3085 input1,
3086 shape0,
3087 output,
3088 1.0f,
3089 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003090}
3091
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003092LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3094{
3095 const unsigned int width = 2;
3096 const unsigned int height = 2;
3097 const unsigned int channelCount = 2;
3098 const unsigned int batchSize = 2;
3099
3100 unsigned int shape[] = { batchSize, channelCount, height, width };
3101
3102 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3103 3, 3, 3, 3, 4, 4, 4, 4 });
3104
3105 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3106 4, 4, 4, 4, 5, 5, 5, 5 });
3107
3108 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3109 4, 4, 4, 4, 5, 5, 5, 5 });
3110
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003111 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3112 workloadFactory,
3113 memoryManager,
3114 shape,
3115 input0,
3116 shape,
3117 input1,
3118 shape,
3119 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003120}
3121
3122LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3123 armnn::IWorkloadFactory& workloadFactory,
3124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3125{
3126 unsigned int shape0[] = { 1, 2, 2, 2 };
3127 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3128
3129 unsigned int shape1[] = { 1, 1, 1, 1 };
3130 std::vector<float> input1({ 2 });
3131
3132 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3133
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003134 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3135 workloadFactory,
3136 memoryManager,
3137 shape0,
3138 input0,
3139 shape1,
3140 input1,
3141 shape0,
3142 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003143}
3144
3145LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3146 armnn::IWorkloadFactory& workloadFactory,
3147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3148{
3149 const unsigned int shape0[] = { 1, 2, 2, 3 };
3150 const unsigned int shape1[] = { 1, 1, 1, 3 };
3151
3152 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3153 7, 8, 9, 10, 11, 12 });
3154
3155 std::vector<float> input1({ 1, 2, 3});
3156
3157 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003158 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003159
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003160 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3161 workloadFactory,
3162 memoryManager,
3163 shape0,
3164 input0,
3165 shape1,
3166 input1,
3167 shape0,
3168 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003169}
3170
3171LayerTestResult<uint8_t, 4> MaximumUint8Test(
3172 armnn::IWorkloadFactory& workloadFactory,
3173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3174{
3175 unsigned int shape[] = { 2, 2, 2, 2 };
3176
3177 // See dequantized values to the right.
3178 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3179 3, 3, 3, 3, 4, 4, 4, 4 });
3180
3181 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3182 4, 4, 4, 4, 5, 5, 5, 5 });
3183
3184 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3185 4, 4, 4, 4, 5, 5, 5, 5 });
3186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003187 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3188 workloadFactory,
3189 memoryManager,
3190 shape,
3191 input0,
3192 shape,
3193 input1,
3194 shape,
3195 output,
3196 1.0f,
3197 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003198}
3199
3200LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3201 armnn::IWorkloadFactory& workloadFactory,
3202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3203{
3204 const unsigned int shape0[] = { 1, 2, 2, 3 };
3205 const unsigned int shape1[] = { 1, 1, 1, 1 };
3206
3207 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3208 7, 8, 9, 10, 11, 12 });
3209
3210 std::vector<uint8_t> input1({2});
3211
3212 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3213 7, 8, 9, 10, 11, 12 });
3214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003215 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3216 workloadFactory,
3217 memoryManager,
3218 shape0,
3219 input0,
3220 shape1,
3221 input1,
3222 shape0,
3223 output,
3224 1.0f,
3225 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003226}
3227
3228LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3229 armnn::IWorkloadFactory& workloadFactory,
3230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3231{
3232 const unsigned int shape0[] = { 1, 2, 2, 3 };
3233 const unsigned int shape1[] = { 1, 1, 1, 3 };
3234
3235 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3236 7, 8, 9, 10, 11, 12 });
3237
3238 std::vector<uint8_t> input1({ 1, 10, 3});
3239
3240 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3241 7, 10, 9, 10, 11, 12 });
3242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003243 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3244 workloadFactory,
3245 memoryManager,
3246 shape0,
3247 input0,
3248 shape1,
3249 input1,
3250 shape0,
3251 output,
3252 1.0f,
3253 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003254}
3255
Sadik Armagan2999a022019-04-09 14:20:12 +01003256LayerTestResult<int16_t, 4> MaximumInt16Test(
3257 armnn::IWorkloadFactory& workloadFactory,
3258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3259{
3260 unsigned int shape[] = { 2, 2, 2, 2 };
3261
3262 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3263 3, 3, 3, 3, 4, 4, 4, 4 });
3264
3265 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3266 4, 4, 4, 4, 5, 5, 5, 5 });
3267
3268 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3269 4, 4, 4, 4, 5, 5, 5, 5 });
3270
3271 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3272 workloadFactory,
3273 memoryManager,
3274 shape,
3275 input0,
3276 shape,
3277 input1,
3278 shape,
3279 output,
3280 1.0f,
3281 0);
3282}
3283
3284LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3285 armnn::IWorkloadFactory& workloadFactory,
3286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3287{
3288 const unsigned int shape0[] = { 1, 2, 2, 3 };
3289 const unsigned int shape1[] = { 1, 1, 1, 1 };
3290
3291 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3292 7, 8, 9, 10, 11, 12 });
3293
3294 std::vector<int16_t> input1({2});
3295
3296 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3297 7, 8, 9, 10, 11, 12 });
3298
3299 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3300 workloadFactory,
3301 memoryManager,
3302 shape0,
3303 input0,
3304 shape1,
3305 input1,
3306 shape0,
3307 output,
3308 1.0f,
3309 0);
3310}
3311
3312LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3313 armnn::IWorkloadFactory& workloadFactory,
3314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3315{
3316 const unsigned int shape0[] = { 1, 2, 2, 3 };
3317 const unsigned int shape1[] = { 1, 1, 1, 3 };
3318
3319 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3320 7, 8, 9, 10, 11, 12 });
3321
3322 std::vector<int16_t> input1({ 1, 10, 3});
3323
3324 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3325 7, 10, 9, 10, 11, 12 });
3326
3327 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3328 workloadFactory,
3329 memoryManager,
3330 shape0,
3331 input0,
3332 shape1,
3333 input1,
3334 shape0,
3335 output,
3336 1.0f,
3337 0);
3338}
3339
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003340LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3341 armnn::IWorkloadFactory& workloadFactory,
3342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3343{
3344 unsigned int shape0[] = { 1, 2, 2, 2 };
3345 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3346
3347 unsigned int shape1[] = { 1, 1, 1, 1 };
3348 std::vector<float> input1({ 2 });
3349
3350 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3351
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003352 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3353 workloadFactory,
3354 memoryManager,
3355 shape0,
3356 input0,
3357 shape1,
3358 input1,
3359 shape0,
3360 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003361}
3362
3363
3364LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3365 armnn::IWorkloadFactory& workloadFactory,
3366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3367{
3368 unsigned int shape0[] = { 1, 2, 2, 2 };
3369 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3370
3371 unsigned int shape1[] = { 1, 1, 1, 1 };
3372 std::vector<float> input1({ 5 });
3373
3374 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3375
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003376 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3377 workloadFactory,
3378 memoryManager,
3379 shape0,
3380 input0,
3381 shape1,
3382 input1,
3383 shape0,
3384 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003385}
3386
3387LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3388 armnn::IWorkloadFactory & workloadFactory,
3389 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3390{
3391 const unsigned int shape0[] = { 1, 2, 2, 3 };
3392 const unsigned int shape1[] = { 1, 1, 1, 3 };
3393
3394 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3395 7, 1, 2, 3, 4, 5 });
3396
3397 std::vector<uint8_t> input1({ 1, 2, 3});
3398
3399 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3400 1, 1, 2, 1, 2, 3 });
3401
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003402 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3403 workloadFactory,
3404 memoryManager,
3405 shape0,
3406 input0,
3407 shape1,
3408 input1,
3409 shape0,
3410 output,
3411 1.0f,
3412 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003413}
3414
Sadik Armagan2999a022019-04-09 14:20:12 +01003415LayerTestResult<int16_t, 4> MinimumInt16Test(
3416 armnn::IWorkloadFactory& workloadFactory,
3417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3418{
3419 unsigned int shape[] = { 2, 2, 2, 2 };
3420
3421 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3422 3, 3, 3, 3, 4, 4, 4, 4 });
3423
3424 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3425 4, 4, 4, 4, 5, 5, 5, 5 });
3426
3427 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3428 3, 3, 3, 3, 4, 4, 4, 4 });
3429
3430 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3431 workloadFactory,
3432 memoryManager,
3433 shape,
3434 input0,
3435 shape,
3436 input1,
3437 shape,
3438 output,
3439 1.0f,
3440 0);
3441}
3442
3443LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3444 armnn::IWorkloadFactory& workloadFactory,
3445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3446{
3447 const unsigned int shape0[] = { 1, 2, 2, 3 };
3448 const unsigned int shape1[] = { 1, 1, 1, 1 };
3449
3450 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3451 7, 8, 9, 10, 11, 12 });
3452
3453 std::vector<int16_t> input1({2});
3454
3455 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3456 2, 2, 2, 2, 2, 2 });
3457
3458 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3459 workloadFactory,
3460 memoryManager,
3461 shape0,
3462 input0,
3463 shape1,
3464 input1,
3465 shape0,
3466 output,
3467 1.0f,
3468 0);
3469}
3470
3471LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3472 armnn::IWorkloadFactory& workloadFactory,
3473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3474{
3475 const unsigned int shape0[] = { 1, 2, 2, 3 };
3476 const unsigned int shape1[] = { 1, 1, 1, 3 };
3477
3478 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3479 7, 8, 9, 10, 11, 12 });
3480
3481 std::vector<int16_t> input1({ 1, 10, 3});
3482
3483 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3484 1, 8, 3, 1, 10, 3 });
3485
3486 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3487 workloadFactory,
3488 memoryManager,
3489 shape0,
3490 input0,
3491 shape1,
3492 input1,
3493 shape0,
3494 output,
3495 1.0f,
3496 0);
3497}
3498
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003499namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003500LayerTestResult<float,4> MultiplicationTestHelper(
3501 armnn::IWorkloadFactory& workloadFactory,
3502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3503 const unsigned int shape0[4],
3504 const std::vector<float> & values0,
3505 const unsigned int shape1[4],
3506 const std::vector<float> & values1,
3507 const unsigned int outShape[4],
3508 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003509{
surmeh01bceff2f2018-03-29 16:29:27 +01003510 const size_t dimensionCount = 4;
3511 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3512 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3513 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003514
surmeh01bceff2f2018-03-29 16:29:27 +01003515 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3516 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003517
3518 LayerTestResult<float,4> ret(outputTensorInfo);
3519
3520 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3521 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3522 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3523
3524 armnn::MultiplicationQueueDescriptor data;
3525 armnn::WorkloadInfo info;
3526 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3527 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3528 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3529
3530 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3531
3532 inputHandle0->Allocate();
3533 inputHandle1->Allocate();
3534 outputHandle->Allocate();
3535
3536 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3537 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3538
Derek Lambertif30f7d32019-04-09 10:25:02 +01003539 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003540 workload->Execute();
3541
3542 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3543
surmeh01bceff2f2018-03-29 16:29:27 +01003544 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003545 return ret;
3546}
surmeh01bceff2f2018-03-29 16:29:27 +01003547} // anonymous namespace
3548
3549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003550LayerTestResult<float,4> MultiplicationTest(
3551 armnn::IWorkloadFactory& workloadFactory,
3552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003553{
3554 const unsigned int width = 2;
3555 const unsigned int height = 2;
3556 const unsigned int channelCount = 2;
3557 const unsigned int batchSize = 2;
3558
3559 unsigned int shape[] = { batchSize, channelCount, height, width };
3560
3561 std::vector<float> input0({
3562 1, 1, 1, 1, 2, 2, 2, 2,
3563 3, 3, 3, 3, 4, 4, 4, 4 });
3564
3565 std::vector<float> input1({
3566 2, 2, 2, 2, 3, 3, 3, 3,
3567 4, 4, 4, 4, 5, 5, 5, 5 });
3568
3569 std::vector<float> output({
3570 2, 2, 2, 2, 6, 6, 6, 6,
3571 12, 12, 12, 12, 20, 20, 20, 20 });
3572
3573 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003574 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003575 shape,
3576 input0,
3577 shape,
3578 input1,
3579 shape,
3580 output);
3581}
3582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003583LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3584 armnn::IWorkloadFactory& workloadFactory,
3585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003586{
3587 unsigned int shape0[] = { 1, 2, 2, 2 };
3588 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3589
3590 unsigned int shape1[] = { 1, 1, 1, 1 };
3591 std::vector<float> input1({ 2 });
3592
3593 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3594
3595 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003596 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003597 shape0,
3598 input0,
3599 shape1,
3600 input1,
3601 shape0,
3602 output);
3603}
3604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003605LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3606 armnn::IWorkloadFactory& workloadFactory,
3607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003608{
3609 unsigned int shape0[] = { 1, 3, 3, 2 };
3610 std::vector<float> input0({
3611 1, 2, 3, 4, 5, 6,
3612 7, 8, 9, 10, 11, 12,
3613 13, 14, 15, 16, 17, 18});
3614
3615 unsigned int shape1[] = { 1, 1, 1, 2 };
3616 std::vector<float> input1({ 1, 2 });
3617
3618 std::vector<float> output({
3619 1, 4, 3, 8, 5, 12,
3620 7, 16, 9, 20, 11, 24,
3621 13, 28, 15, 32, 17, 36});
3622
3623 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003624 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003625 shape0,
3626 input0,
3627 shape1,
3628 input1,
3629 shape0,
3630 output);
3631}
telsoa014fcda012018-03-09 14:13:49 +00003632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003633LayerTestResult<float,4> CompareMultiplicationTest(
3634 armnn::IWorkloadFactory& workloadFactory,
3635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3636 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003637{
3638 const unsigned int width = 16;
3639 const unsigned int height = 32;
3640 const unsigned int channelCount = 2;
3641 const unsigned int batchSize = 5;
3642
3643 armnn::TensorInfo inputTensorInfo0;
3644 armnn::TensorInfo inputTensorInfo1;
3645 armnn::TensorInfo outputTensorInfo;
3646
3647 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3648
3649 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3650 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3651 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3652
3653 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3654
3655 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3656 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3657
3658 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3659 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3660 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3661
3662 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3663 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3664 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3665
3666 armnn::MultiplicationQueueDescriptor data;
3667 armnn::WorkloadInfo info;
3668 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3669 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3670 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3671
3672 armnn::MultiplicationQueueDescriptor refData = data;
3673 armnn::WorkloadInfo refInfo = info;
3674 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3675 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3676 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3677
3678 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3679 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3680
3681 inputHandle0->Allocate();
3682 inputHandle1->Allocate();
3683 outputHandle->Allocate();
3684 inputHandle0Ref->Allocate();
3685 inputHandle1Ref->Allocate();
3686 outputHandleRef->Allocate();
3687
3688 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3689 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3690 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3691 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3692
Derek Lambertif30f7d32019-04-09 10:25:02 +01003693 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003694 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003695 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003696 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003697 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3698 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3699
3700 return comparisonResult;
3701}
3702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003703LayerTestResult<float,4> CompareBatchNormTest(
3704 armnn::IWorkloadFactory& workloadFactory,
3705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3706 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003707{
3708 const unsigned int width = 2;
3709 const unsigned int height = 3;
3710 const unsigned int channels = 5;
3711 const unsigned int batchSize = 3;
3712
3713 armnn::TensorInfo inputTensorInfo;
3714 armnn::TensorInfo outputTensorInfo;
3715 armnn::TensorInfo tensorInfo;
3716
3717 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3718 constexpr unsigned int tensorShape[] = {channels};
3719
3720 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3721 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3722 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3723
3724 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3725
3726 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3727 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3728 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3729 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3730
3731 LayerTestResult<float,4> ret(outputTensorInfo);
3732
3733 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3734 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3735
3736 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3737 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3738
3739 armnn::BatchNormalizationQueueDescriptor data;
3740 armnn::WorkloadInfo info;
3741 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3742 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3743 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3744 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3745
3746 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3747 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3748 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3749 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3750
3751 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3752 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3753 data.m_Mean = &meanTensor;
3754 data.m_Variance = &varianceTensor;
3755 data.m_Beta = &betaTensor;
3756 data.m_Gamma = &gammaTensor;
3757 data.m_Parameters.m_Eps = 0.01f;
3758
3759 armnn::BatchNormalizationQueueDescriptor refData = data;
3760 armnn::WorkloadInfo refInfo = info;
3761 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3762 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3763
3764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3765 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3766
3767 inputHandle->Allocate();
3768 outputHandle->Allocate();
3769 inputHandleRef->Allocate();
3770 outputHandleRef->Allocate();
3771
3772 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3773 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3774
Derek Lambertif30f7d32019-04-09 10:25:02 +01003775 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003776 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003777 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003778 workloadRef->Execute();
3779
3780 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3781 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3782
3783 return ret;
3784}
3785
surmeh013537c2c2018-05-18 16:31:43 +01003786template<typename T>
3787void PermuteTensorData(
3788 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003790 const armnn::PermutationVector& mappings,
3791 armnn::TensorInfo & inputTensorInfo,
3792 const T * inputData,
3793 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003794{
surmeh013537c2c2018-05-18 16:31:43 +01003795 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3796 if (inputData == nullptr)
3797 {
3798 // Nullptr is an error in the test. By returning without doing the concatenation
3799 // I expect the caller to fail the test. It still makes sense to report this as
3800 // an assert for Debug builds.
3801 return;
3802 }
telsoa014fcda012018-03-09 14:13:49 +00003803
surmeh013537c2c2018-05-18 16:31:43 +01003804 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3805
3806 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3807 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3808
3809 armnn::PermuteQueueDescriptor queueDescriptor;
3810 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3811 armnn::WorkloadInfo workloadInfo;
3812 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3813 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3814
3815 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3816
3817 inputHandle->Allocate();
3818 outputHandle->Allocate();
3819
3820 CopyDataToITensorHandle(inputHandle.get(), inputData);
3821
Derek Lambertif30f7d32019-04-09 10:25:02 +01003822 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003823 workload->Execute();
3824
3825 outputData.resize(outputTensorInfo.GetNumElements());
3826 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3827 inputTensorInfo = outputTensorInfo;
3828}
3829
Jim Flynn825af452019-05-20 12:49:28 +01003830armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003831 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3832 unsigned int concatDim)
3833{
telsoa014fcda012018-03-09 14:13:49 +00003834 std::vector<armnn::TensorShape> shapes;
3835 shapes.reserve(inputTensorInfos.size());
3836 for (const armnn::TensorInfo& it: inputTensorInfos)
3837 {
3838 shapes.push_back(it.GetShape());
3839 }
surmeh013537c2c2018-05-18 16:31:43 +01003840
Jim Flynn825af452019-05-20 12:49:28 +01003841 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3842 shapes.end(),
3843 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003844}
3845
3846//
narpra015cdda352018-11-19 15:30:27 +00003847// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3848// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3849// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003850//
3851
3852bool NeedPermuteForConcat(
3853 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3854 unsigned int concatDim)
3855{
3856 // See note above. Additionally we expect the input shapes to have the
3857 // same number of dimensions.
3858 unsigned int nDimensions = 0;
3859
telsoa01c577f2c2018-08-31 09:22:23 +01003860 // Determine the number of dimensions as well as sanity check them
3861 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003862 for (auto && tensorInfo : inputTensorInfos)
3863 {
3864 if (!nDimensions)
3865 {
3866 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3867 }
3868 else
3869 {
3870 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3871 "Input shapes must have the same number of dimensions");
3872 }
3873 }
3874
narpra015cdda352018-11-19 15:30:27 +00003875 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003876}
3877
3878armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3879{
3880 unsigned int numDims = inputShape.GetNumDimensions();
3881 if (numDims >= 3)
3882 {
3883 // Nothing to do if the inputShape has at least 3 dimensions.
3884 return inputShape;
3885 }
3886
3887 std::vector<unsigned int> newDims(size_t(3), 1u);
3888 unsigned int expandedBy = 3 - numDims;
3889 for (unsigned int i=0; i<numDims; ++i)
3890 {
3891 newDims[expandedBy+i] = inputShape[i];
3892 }
3893 return armnn::TensorShape(3u, &newDims[0]);
3894}
3895
3896void Generate3dPermuteVectorForConcat(
3897 unsigned int numDimensions,
3898 unsigned int & concatDim,
3899 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3900{
3901 BOOST_ASSERT_MSG(numDimensions <= 3,
3902 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003903 unsigned int expandedBy = 3 - numDimensions;
3904 unsigned int expandedConcatAxis = concatDim + expandedBy;
3905
3906 if (expandedConcatAxis == 2)
3907 {
3908 concatDim = 0;
3909 armnn::PermutationVector forwardPermutation({1, 2, 0});
3910 armnn::PermutationVector reversePermutation({2, 0, 1});
3911 permutations = std::make_pair(forwardPermutation, reversePermutation);
3912 }
3913 else if (expandedConcatAxis == 1)
3914 {
3915 concatDim = 0;
3916 armnn::PermutationVector forwardPermutation({2, 0, 1});
3917 armnn::PermutationVector reversePermutation({1, 2, 0});
3918 permutations = std::make_pair(forwardPermutation, reversePermutation);
3919 }
3920 else
3921 {
3922 BOOST_ASSERT(expandedConcatAxis == 0);
3923 concatDim = 0;
3924 }
3925}
3926
3927//
3928// Permute the input tensors so we can do a supported concatenation.
3929// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3930// at the front. Finally this function tells what the output shape
3931// of the permuted concatenated tensor is going to be.
3932//
3933template <typename T>
3934void PermuteInputsForConcat(
3935 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003937 std::vector<armnn::TensorInfo> & inputTensorInfos,
3938 std::vector<T *> & inputData,
3939 std::vector<std::vector<T>> & inputDataStorage,
3940 armnn::PermutationVector & permuteVector,
3941 unsigned int & concatDim,
3942 armnn::TensorInfo & outputTensorInfo)
3943{
3944 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3945 "Expecting more than one tensor to be concatenated here");
3946
3947 unsigned int numDims = 0;
3948 unsigned int nthInput = 0;
3949 const armnn::PermutationVector identity({0, 1, 2});
3950
3951 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3952 std::make_pair(identity, identity);
3953
3954 inputDataStorage.resize(inputData.size());
3955
3956 for (auto && tensorInfo : inputTensorInfos)
3957 {
3958 if (numDims == 0)
3959 {
3960 numDims = tensorInfo.GetShape().GetNumDimensions();
3961 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003962
telsoa01c577f2c2018-08-31 09:22:23 +01003963 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003964 permuteVector = permutations.second;
3965 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3966 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3967 }
3968 else
3969 {
3970 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3971 "All inputs must have the same number of dimensions");
3972 }
3973
3974 armnn::TensorInfo newTensorInfo = tensorInfo;
3975 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3976
3977 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003978 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003979 permutations.first,
3980 newTensorInfo,
3981 inputData[nthInput],
3982 inputDataStorage[nthInput]);
3983
3984 inputData[nthInput] = inputDataStorage[nthInput].data();
3985 inputTensorInfos[nthInput] = newTensorInfo;
3986
3987 ++nthInput;
3988 }
3989
3990 outputTensorInfo.SetShape(
3991 armnnUtils::Permuted(
3992 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3993 permutations.first));
3994}
3995
3996
3997//
3998// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003999// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004000// output.
4001//
4002template <typename T>
4003void PermuteOutputForConcat(
4004 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004006 const armnn::TensorInfo & tensorInfo,
4007 const armnn::PermutationVector & permuteVector,
4008 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4009 T * data)
4010{
4011 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4012 if (data == nullptr)
4013 {
4014 // Nullptr is an error in the test. By returning without doing the permutation
4015 // I expect the caller to fail the test. It still makes sense to report this as
4016 // an assert for Debug builds.
4017 return;
4018 }
4019
4020 armnn::TensorInfo resultTensorInfo = tensorInfo;
4021 std::vector<T> inputData(tensorInfo.GetNumElements());
4022 std::vector<T> outputData;
4023
4024 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4025
4026 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004027 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004028 permuteVector,
4029 resultTensorInfo,
4030 &inputData[0],
4031 outputData);
4032
4033 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4034}
4035
4036template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004037void Concatenate(
4038 armnn::IWorkloadFactory& workloadFactory,
4039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4040 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4041 std::initializer_list<T *> inputsOrig,
4042 const armnn::TensorInfo& outputTensorInfoOrig,
4043 T * output,
narpra015cdda352018-11-19 15:30:27 +00004044 unsigned int concatDim,
4045 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004046{
4047 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4048 if (output == nullptr)
4049 {
4050 // Nullptr is an error in the test. By returning without doing the permutation
4051 // I expect the caller to fail the test. It still makes sense to report this as
4052 // an assert for Debug builds.
4053 return;
4054 }
4055
telsoa01c577f2c2018-08-31 09:22:23 +01004056 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004057 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4058 std::vector<T *> inputs = inputsOrig;
4059 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4060
4061 armnn::PermutationVector permuteVector{0, 1, 2};
4062
telsoa01c577f2c2018-08-31 09:22:23 +01004063 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004064 std::vector<std::vector<T>> tmpInputDataStorage;
4065
4066 const size_t inputCount = inputTensorInfos.size();
4067
4068 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4069
4070 if (needPermuteForConcat)
4071 {
4072 //
4073 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004074 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004075 //
4076 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004077 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004078 inputTensorInfos,
4079 inputs,
4080 tmpInputDataStorage,
4081 permuteVector,
4082 concatDim,
4083 outputTensorInfo);
4084 }
4085
narpra015cdda352018-11-19 15:30:27 +00004086 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004087
4088 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4089 inputHandles.reserve(inputCount);
4090
narpra015cdda352018-11-19 15:30:27 +00004091 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4092
Jim Flynne242f2d2019-05-22 14:24:13 +01004093 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004094 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004095 queueDescriptor.m_Parameters = viewsDescriptor;
4096
4097 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004098 {
narpra015cdda352018-11-19 15:30:27 +00004099 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4100 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4101 {
4102 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4103 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4104 }
telsoa014fcda012018-03-09 14:13:49 +00004105
narpra015cdda352018-11-19 15:30:27 +00004106 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004107
narpra015cdda352018-11-19 15:30:27 +00004108 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4109 for (unsigned int i = 0; i < inputCount; ++i)
4110 {
4111 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4112 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4113 subTensorsSupported ?
4114 workloadFactory.CreateSubTensorHandle(*outputHandle,
4115 inputTensorInfo.GetShape(),
4116 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4117 workloadFactory.CreateTensorHandle(inputTensorInfo);
4118
4119 inputHandles.emplace_back(std::move(inputHandle));
4120 }
4121
telsoa014fcda012018-03-09 14:13:49 +00004122 }
narpra015cdda352018-11-19 15:30:27 +00004123 else
4124 {
4125 for (unsigned int i = 0; i < inputCount; ++i)
4126 {
4127 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4128 inputHandles.emplace_back(std::move(inputHandle));
4129 }
4130 }
telsoa014fcda012018-03-09 14:13:49 +00004131
4132 for (unsigned int i = 0; i < inputCount; ++i)
4133 {
surmeh013537c2c2018-05-18 16:31:43 +01004134 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004135 }
4136
4137 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4138
Jim Flynn4ed6c832019-05-20 11:02:46 +01004139 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004140
4141 for (auto& inputHandle : inputHandles)
4142 {
4143 inputHandle->Allocate();
4144 }
4145
4146 outputHandle->Allocate();
4147
4148 unsigned int nextInputId = 0;
4149 for (auto& inputHandle : inputHandles)
4150 {
surmeh013537c2c2018-05-18 16:31:43 +01004151 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4152 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004153 }
4154
Derek Lambertif30f7d32019-04-09 10:25:02 +01004155 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004156 workload->Execute();
4157
surmeh013537c2c2018-05-18 16:31:43 +01004158 if (needPermuteForConcat)
4159 {
4160 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004161 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004162 outputTensorInfo,
4163 permuteVector,
4164 std::move(outputHandle),
4165 output);
4166 }
4167 else
4168 {
4169 CopyDataFromITensorHandle(output, outputHandle.get());
4170 }
telsoa014fcda012018-03-09 14:13:49 +00004171}
4172
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004173template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004174LayerTestResult<T, 1> Concatenation1dTestImpl(
4175 armnn::IWorkloadFactory& workloadFactory,
4176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4177 float qScale,
4178 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004179{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004180 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004181
4182 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4183 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4184 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4185
Jim Flynncbb66aa2019-05-15 13:03:54 +01004186 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004187
4188 LayerTestResult<T, 1> result(outputTensorInfo);
4189
4190 std::vector<T> output;
4191 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004192 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004193 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4194 { input0.data(), input1.data(), input2.data() },
4195 outputTensorInfo,
4196 output.data(),
4197 0,
4198 true);
telsoa014fcda012018-03-09 14:13:49 +00004199
4200 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4201 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4202 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4203 }));
4204
4205 return result;
4206}
4207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004208LayerTestResult<float, 1> Concatenation1dTest(
4209 armnn::IWorkloadFactory& workloadFactory,
4210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004211{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004212 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004213}
4214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004215template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004216LayerTestResult<T, 2> Concatenation2dTestImpl(
4217 armnn::IWorkloadFactory& workloadFactory,
4218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004219 const armnn::TensorInfo& outputTensorInfo,
4220 unsigned int dimension,
4221 const float qScale,
4222 const int32_t qOffset)
4223{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004224 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004225
4226 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4227 // Batch 0
4228 1.0f, 2.0f, 3.0f,
4229
4230 // Batch 1
4231 10.0f, 11.0f, 12.0f,
4232 }));
4233
4234 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4235 // Batch 0
4236 4.0f, 5.0f, 6.0f,
4237
4238 // Batch 1
4239 13.0f, 14.0f, 15.0f,
4240 }));
4241
4242 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4243 // Batch 0
4244 7.0f, 8.0f, 9.0f,
4245
4246 // Batch 1
4247 16.0f, 17.0f, 18.0f,
4248 }));
4249
4250 LayerTestResult<T, 2> result(outputTensorInfo);
4251
4252 std::vector<T> output;
4253 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004254 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004255 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4256 { input0.data(), input1.data(), input2.data() },
4257 outputTensorInfo,
4258 output.data(),
4259 dimension,
4260 true);
telsoa014fcda012018-03-09 14:13:49 +00004261
4262 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4263 return result;
4264}
4265
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004266template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004267LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4268 armnn::IWorkloadFactory& workloadFactory,
4269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4270 float qScale,
4271 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004272{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004273 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004275 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4276 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4277
telsoa014fcda012018-03-09 14:13:49 +00004278 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4279 // Batch 0
4280 1.0f, 2.0f, 3.0f,
4281
4282 // Batch 1
4283 10.0f, 11.0f, 12.0f,
4284
4285 // Batch 2
4286 4.0f, 5.0f, 6.0f,
4287
4288 // Batch 3
4289 13.0f, 14.0f, 15.0f,
4290
4291 // Batch 4
4292 7.0f, 8.0f, 9.0f,
4293
4294 // Batch 5
4295 16.0f, 17.0f, 18.0f,
4296 }));
4297
4298 return result;
4299}
4300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004301LayerTestResult<float, 2> Concatenation2dDim0Test(
4302 armnn::IWorkloadFactory& workloadFactory,
4303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004304{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004305 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004306}
4307
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004308template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004309LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4310 armnn::IWorkloadFactory& workloadFactory,
4311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4312 float qScale,
4313 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004314{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004315 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004317 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4318 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4319
telsoa014fcda012018-03-09 14:13:49 +00004320 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4321 // Batch 0
4322 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4323
4324 // Batch 1
4325 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4326 }));
4327
4328 return result;
4329}
4330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004331LayerTestResult<float, 2> Concatenation2dDim1Test(
4332 armnn::IWorkloadFactory& workloadFactory,
4333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004334{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004335 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004336}
4337
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004338template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004339LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4340 armnn::IWorkloadFactory& workloadFactory,
4341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4342 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004343 int32_t qOffset)
4344{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004345 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004346 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4347 // Batch 0
4348 1.0f, 2.0f, 3.0f,
4349
4350 // Batch 1
4351 10.0f, 11.0f, 12.0f,
4352 }));
4353
Jim Flynncbb66aa2019-05-15 13:03:54 +01004354 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004355 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4356 // Batch 0
4357 4.0f, 5.0f, 6.0f,
4358
4359 // Batch 1
4360 13.0f, 14.0f, 15.0f,
4361
4362 // Batch 0
4363 7.0f, 8.0f, 9.0f,
4364 }));
4365
Jim Flynncbb66aa2019-05-15 13:03:54 +01004366 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004367 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4368 // Batch 1
4369 16.0f, 17.0f, 18.0f,
4370 }));
4371
Jim Flynncbb66aa2019-05-15 13:03:54 +01004372 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004373 LayerTestResult<T, 2> result(outputTensorInfo);
4374
4375 std::vector<T> output;
4376 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004377 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004378 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4379 { input0.data(), input1.data(), input2.data() },
4380 outputTensorInfo,
4381 output.data(),
4382 0,
4383 true);
telsoa014fcda012018-03-09 14:13:49 +00004384
4385 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4386 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4387 // Batch 0
4388 1.0f, 2.0f, 3.0f,
4389
4390 // Batch 1
4391 10.0f, 11.0f, 12.0f,
4392
4393 // Batch 2
4394 4.0f, 5.0f, 6.0f,
4395
4396 // Batch 3
4397 13.0f, 14.0f, 15.0f,
4398
4399 // Batch 4
4400 7.0f, 8.0f, 9.0f,
4401
4402 // Batch 5
4403 16.0f, 17.0f, 18.0f,
4404 }));
4405
4406 return result;
4407}
4408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004409LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4410 armnn::IWorkloadFactory& workloadFactory,
4411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004412{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004413 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4414 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004415}
4416
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004417template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004418LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4419 armnn::IWorkloadFactory& workloadFactory,
4420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4421 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004422 int32_t qOffset)
4423{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004424 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004425 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4426 // Batch 0
4427 1.0f, 2.0f, 3.0f,
4428
4429 // Batch 1
4430 10.0f, 11.0f, 12.0f,
4431 }));
4432
Jim Flynncbb66aa2019-05-15 13:03:54 +01004433 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004434 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4435 // Batch 0
4436 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4437
4438 // Batch 1
4439 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4440 }));
4441
Jim Flynncbb66aa2019-05-15 13:03:54 +01004442 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004443 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4444 // Batch 0
4445 9.0f,
4446
4447 // Batch 1
4448 18.0f
4449 }));
4450
Jim Flynncbb66aa2019-05-15 13:03:54 +01004451 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004452 LayerTestResult<T, 2> result(outputTensorInfo);
4453
4454 std::vector<T> output;
4455 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004456 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004457 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4458 { input0.data(), input1.data(), input2.data() },
4459 outputTensorInfo,
4460 output.data(),
4461 1,
4462 true);
telsoa014fcda012018-03-09 14:13:49 +00004463
4464 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4465 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4466 // Batch 0
4467 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4468
4469 // Batch 1
4470 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4471 }));
4472
4473 return result;
4474}
4475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004476LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4477 armnn::IWorkloadFactory& workloadFactory,
4478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004480 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4481 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004482}
4483
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004484template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004485LayerTestResult<T, 3> Concatenation3dTestImpl(
4486 armnn::IWorkloadFactory& workloadFactory,
4487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004488 const armnn::TensorInfo& outputTensorInfo,
4489 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004490 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004491 float qScale,
4492 int32_t qOffset)
4493{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004494 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004495
4496 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4497 // Batch 0, Channel 0
4498 1.0f, 2.0f,
4499
4500 // Batch 0, Channel 1
4501 3.0f, 4.0f,
4502
4503 // Batch 0, Channel 2
4504 5.0f, 6.0f,
4505
4506 // Batch 1, Channel 0
4507 19.0f, 20.0f,
4508
4509 // Batch 1, Channel 1
4510 21.0f, 22.0f,
4511
4512 // Batch 1, Channel 2
4513 23.0f, 24.0f
4514 }));
4515
4516 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4517 // Batch 0, Channel 0
4518 7.0f, 8.0f,
4519
4520 // Batch 0, Channel 1
4521 9.0f, 10.0f,
4522
4523 // Batch 0, Channel 2
4524 11.0f, 12.0f,
4525
4526 // Batch 1, Channel 0
4527 25.0f, 26.0f,
4528
4529 // Batch 1, Channel 1
4530 27.0f, 28.0f,
4531
4532 // Batch 1, Channel 2
4533 29.0f, 30.0f
4534 }));
4535
4536 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4537 // Batch 0, Channel 0
4538 13.0f, 14.0f,
4539
4540 // Batch 0, Channel 1
4541 15.0f, 16.0f,
4542
4543 // Batch 0, Channel 2
4544 17.0f, 18.0f,
4545
4546 // Batch 1, Channel 0
4547 31.0f, 32.0f,
4548
4549 // Batch 1, Channel 1
4550 33.0f, 34.0f,
4551
4552 // Batch 1, Channel 2
4553 35.0f, 36.0f
4554 }));
4555
4556 LayerTestResult<T, 3> result(outputTensorInfo);
4557
4558 std::vector<T> output;
4559 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004560 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004561 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4562 { input0.data(), input1.data(), input2.data() },
4563 outputTensorInfo,
4564 output.data(),
4565 dimension,
4566 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004567
4568 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4569 return result;
4570}
4571
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004572template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004573LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4574 armnn::IWorkloadFactory& workloadFactory,
4575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4576 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004577 int32_t qOffset)
4578{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004579 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004580
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004581 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4582 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4583
telsoa014fcda012018-03-09 14:13:49 +00004584 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4585 // Batch 0, Channel 0
4586 1.0f, 2.0f,
4587
4588 // Batch 0, Channel 1
4589 3.0f, 4.0f,
4590
4591 // Batch 0, Channel 2
4592 5.0f, 6.0f,
4593
4594 // Batch 1, Channel 0
4595 19.0f, 20.0f,
4596
4597 // Batch 1, Channel 1
4598 21.0f, 22.0f,
4599
4600 // Batch 1, Channel 2
4601 23.0f, 24.0f,
4602
4603 // Batch 2, Channel 0
4604 7.0f, 8.0f,
4605
4606 // Batch 2, Channel 1
4607 9.0f, 10.0f,
4608
4609 // Batch 2, Channel 2
4610 11.0f, 12.0f,
4611
4612 // Batch 3, Channel 0
4613 25.0f, 26.0f,
4614
4615 // Batch 3, Channel 1
4616 27.0f, 28.0f,
4617
4618 // Batch 3, Channel 2
4619 29.0f, 30.0f,
4620
4621 // Batch 4, Channel 0
4622 13.0f, 14.0f,
4623
4624 // Batch 4, Channel 1
4625 15.0f, 16.0f,
4626
4627 // Batch 4, Channel 2
4628 17.0f, 18.0f,
4629
4630 // Batch 5, Channel 0
4631 31.0f, 32.0f,
4632
4633 // Batch 5, Channel 1
4634 33.0f, 34.0f,
4635
4636 // Batch 5, Channel 2
4637 35.0f, 36.0f
4638 }));
narpra015cdda352018-11-19 15:30:27 +00004639
telsoa014fcda012018-03-09 14:13:49 +00004640 return result;
4641}
4642
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004643LayerTestResult<float, 3> Concatenation3dDim0Test(
4644 armnn::IWorkloadFactory& workloadFactory,
4645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004647 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004648}
4649
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004650template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004651LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4652 armnn::IWorkloadFactory& workloadFactory,
4653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4654 float qScale,
4655 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004656{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004657 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004658
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004659 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4660 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004661
telsoa014fcda012018-03-09 14:13:49 +00004662 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4663 // Batch 0, Channel 0
4664 1.0f, 2.0f,
4665
4666 // Batch 0, Channel 1
4667 3.0f, 4.0f,
4668
4669 // Batch 0, Channel 2
4670 5.0f, 6.0f,
4671
4672 // Batch 0, Channel 3
4673 7.0f, 8.0f,
4674
4675 // Batch 0, Channel 4
4676 9.0f, 10.0f,
4677
4678 // Batch 0, Channel 5
4679 11.0f, 12.0f,
4680
4681 // Batch 0, Channel 6
4682 13.0f, 14.0f,
4683
4684 // Batch 0, Channel 7
4685 15.0f, 16.0f,
4686
4687 // Batch 0, Channel 8
4688 17.0f, 18.0f,
4689
4690 // Batch 1, Channel 0
4691 19.0f, 20.0f,
4692
4693 // Batch 1, Channel 1
4694 21.0f, 22.0f,
4695
4696 // Batch 1, Channel 2
4697 23.0f, 24.0f,
4698
4699 // Batch 1, Channel 3
4700 25.0f, 26.0f,
4701
4702 // Batch 1, Channel 4
4703 27.0f, 28.0f,
4704
4705 // Batch 1, Channel 5
4706 29.0f, 30.0f,
4707
4708 // Batch 1, Channel 6
4709 31.0f, 32.0f,
4710
4711 // Batch 1, Channel 7
4712 33.0f, 34.0f,
4713
4714 // Batch 1, Channel 8
4715 35.0f, 36.0f
4716 }));
4717
4718 return result;
4719}
4720
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004721LayerTestResult<float, 3> Concatenation3dDim1Test(
4722 armnn::IWorkloadFactory& workloadFactory,
4723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004724{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004725 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004726}
4727
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004728template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004729LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4730 armnn::IWorkloadFactory& workloadFactory,
4731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004732 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004733 float qScale,
4734 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004735{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004736 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004737
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004738 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4739 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004740
telsoa014fcda012018-03-09 14:13:49 +00004741 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4742 // Batch 0, Channel 0
4743 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4744
4745 // Batch 0, Channel 1
4746 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4747
4748 // Batch 0, Channel 2
4749 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4750
4751 // Batch 1, Channel 0
4752 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4753
4754 // Batch 1, Channel 1
4755 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4756
4757 // Batch 1, Channel 2
4758 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4759 }));
4760
4761 return result;
4762}
4763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764LayerTestResult<float, 3> Concatenation3dDim2Test(
4765 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4767 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004769 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4770 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004771}
4772
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004773template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004774LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4775 armnn::IWorkloadFactory& workloadFactory,
4776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4777 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004778 int32_t qOffset)
4779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004780 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004781 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4782 // Batch 0, Channel 0
4783 1.0f, 2.0f,
4784
4785 // Batch 0, Channel 1
4786 3.0f, 4.0f,
4787
4788 // Batch 0, Channel 2
4789 5.0f, 6.0f,
4790
4791 // Batch 1, Channel 0
4792 19.0f, 20.0f,
4793
4794 // Batch 1, Channel 1
4795 21.0f, 22.0f,
4796
4797 // Batch 1, Channel 2
4798 23.0f, 24.0f
4799 }));
4800
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004801 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004802 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4803 // Batch 0, Channel 0
4804 7.0f, 8.0f,
4805
4806 // Batch 0, Channel 1
4807 9.0f, 10.0f,
4808
4809 // Batch 0, Channel 2
4810 11.0f, 12.0f,
4811 }));
4812
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004813 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004814 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4815 // Batch 0, Channel 0
4816 25.0f, 26.0f,
4817
4818 // Batch 0, Channel 1
4819 27.0f, 28.0f,
4820
4821 // Batch 0, Channel 2
4822 29.0f, 30.0f,
4823
4824 // Batch 1, Channel 0
4825 13.0f, 14.0f,
4826
4827 // Batch 1, Channel 1
4828 15.0f, 16.0f,
4829
4830 // Batch 1, Channel 2
4831 17.0f, 18.0f,
4832
4833 // Batch 2, Channel 0
4834 31.0f, 32.0f,
4835
4836 // Batch 2, Channel 1
4837 33.0f, 34.0f,
4838
4839 // Batch 2, Channel 2
4840 35.0f, 36.0f
4841 }));
4842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004843 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004844 LayerTestResult<T, 3> result(outputTensorInfo);
4845
4846 std::vector<T> output;
4847 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004848 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004849 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4850 { input0.data(), input1.data(), input2.data() },
4851 outputTensorInfo,
4852 output.data(),
4853 0,
4854 true);
telsoa014fcda012018-03-09 14:13:49 +00004855
4856 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4857 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4858 // Batch 0, Channel 0
4859 1.0f, 2.0f,
4860
4861 // Batch 0, Channel 1
4862 3.0f, 4.0f,
4863
4864 // Batch 0, Channel 2
4865 5.0f, 6.0f,
4866
4867 // Batch 1, Channel 0
4868 19.0f, 20.0f,
4869
4870 // Batch 1, Channel 1
4871 21.0f, 22.0f,
4872
4873 // Batch 1, Channel 2
4874 23.0f, 24.0f,
4875
4876 // Batch 2, Channel 0
4877 7.0f, 8.0f,
4878
4879 // Batch 2, Channel 1
4880 9.0f, 10.0f,
4881
4882 // Batch 2, Channel 2
4883 11.0f, 12.0f,
4884
4885 // Batch 3, Channel 0
4886 25.0f, 26.0f,
4887
4888 // Batch 3, Channel 1
4889 27.0f, 28.0f,
4890
4891 // Batch 3, Channel 2
4892 29.0f, 30.0f,
4893
4894 // Batch 4, Channel 0
4895 13.0f, 14.0f,
4896
4897 // Batch 4, Channel 1
4898 15.0f, 16.0f,
4899
4900 // Batch 4, Channel 2
4901 17.0f, 18.0f,
4902
4903 // Batch 5, Channel 0
4904 31.0f, 32.0f,
4905
4906 // Batch 5, Channel 1
4907 33.0f, 34.0f,
4908
4909 // Batch 5, Channel 2
4910 35.0f, 36.0f
4911 }));
4912
4913 return result;
4914}
4915
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004916LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4917 armnn::IWorkloadFactory& workloadFactory,
4918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004919{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004920 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4921 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004922}
4923
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004924template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004925LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4926 armnn::IWorkloadFactory& workloadFactory,
4927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4928 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004929 int32_t qOffset)
4930{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004931 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004932 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4933 // Batch 0, Channel 0
4934 1.0f, 2.0f,
4935
4936 // Batch 0, Channel 1
4937 3.0f, 4.0f,
4938
4939 // Batch 0, Channel 2
4940 5.0f, 6.0f,
4941
4942 // Batch 1, Channel 0
4943 19.0f, 20.0f,
4944
4945 // Batch 1, Channel 1
4946 21.0f, 22.0f,
4947
4948 // Batch 1, Channel 2
4949 23.0f, 24.0f
4950 }));
4951
Jim Flynncbb66aa2019-05-15 13:03:54 +01004952 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004953 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4954 // Batch 0, Channel 0
4955 7.0f, 8.0f,
4956
4957 // Batch 0, Channel 1
4958 9.0f, 10.0f,
4959
4960 // Batch 0, Channel 2
4961 11.0f, 12.0f,
4962
4963 // Batch 0, Channel 3
4964 25.0f, 26.0f,
4965
4966 // Batch 1, Channel 0
4967 27.0f, 28.0f,
4968
4969 // Batch 1, Channel 1
4970 29.0f, 30.0f,
4971
4972 // Batch 1, Channel 2
4973 13.0f, 14.0f,
4974
4975 // Batch 1, Channel 3
4976 15.0f, 16.0f,
4977 }));
4978
Jim Flynncbb66aa2019-05-15 13:03:54 +01004979 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004980 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4981 // Batch 0, Channel 0
4982 17.0f, 18.0f,
4983
4984 // Batch 1, Channel 0
4985 31.0f, 32.0f,
4986 }));
4987
Jim Flynncbb66aa2019-05-15 13:03:54 +01004988 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004989 LayerTestResult<T, 3> result(outputTensorInfo);
4990
4991 std::vector<T> output;
4992 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004993 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004994 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4995 { input0.data(), input1.data(), input2.data() },
4996 outputTensorInfo,
4997 output.data(),
4998 1,
4999 true);
telsoa014fcda012018-03-09 14:13:49 +00005000
5001 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5002 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5003 // Batch 0, Channel 0
5004 1.0f, 2.0f,
5005
5006 // Batch 0, Channel 1
5007 3.0f, 4.0f,
5008
5009 // Batch 0, Channel 2
5010 5.0f, 6.0f,
5011
5012 // Batch 0, Channel 3
5013 7.0f, 8.0f,
5014
5015 // Batch 0, Channel 4
5016 9.0f, 10.0f,
5017
5018 // Batch 0, Channel 5
5019 11.0f, 12.0f,
5020
5021 // Batch 0, Channel 6
5022 25.0f, 26.0f,
5023
5024 // Batch 0, Channel 7
5025 17.0f, 18.0f,
5026
5027 // Batch 1, Channel 0
5028 19.0f, 20.0f,
5029
5030 // Batch 1, Channel 1
5031 21.0f, 22.0f,
5032
5033 // Batch 1, Channel 2
5034 23.0f, 24.0f,
5035
5036 // Batch 1, Channel 3
5037 27.0f, 28.0f,
5038
5039 // Batch 1, Channel 4
5040 29.0f, 30.0f,
5041
5042 // Batch 1, Channel 5
5043 13.0f, 14.0f,
5044
5045 // Batch 1, Channel 6
5046 15.0f, 16.0f,
5047
5048 // Batch 1, Channel 7
5049 31.0f, 32.0f,
5050 }));
5051
5052 return result;
5053}
5054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005055LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5056 armnn::IWorkloadFactory& workloadFactory,
5057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005059 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5060 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005061}
5062
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005063template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005064LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5065 armnn::IWorkloadFactory& workloadFactory,
5066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005067 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005068 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005069 int32_t qOffset)
5070{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005071 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005072 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5073 // Batch 0, Channel 0
5074 1.0f, 2.0f,
5075
5076 // Batch 0, Channel 1
5077 3.0f, 4.0f,
5078
5079 // Batch 0, Channel 2
5080 5.0f, 6.0f,
5081
5082 // Batch 1, Channel 0
5083 19.0f, 20.0f,
5084
5085 // Batch 1, Channel 1
5086 21.0f, 22.0f,
5087
5088 // Batch 1, Channel 2
5089 23.0f, 24.0f
5090 }));
5091
Jim Flynncbb66aa2019-05-15 13:03:54 +01005092 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005093 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5094 // Batch 0, Channel 0
5095 7.0f,
5096
5097 // Batch 0, Channel 1
5098 9.0f,
5099
5100 // Batch 0, Channel 2
5101 11.0f,
5102
5103 // Batch 1, Channel 0
5104 25.0f,
5105
5106 // Batch 1, Channel 1
5107 27.0f,
5108
5109 // Batch 1, Channel 2
5110 29.0f
5111 }));
5112
Jim Flynncbb66aa2019-05-15 13:03:54 +01005113 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005114 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5115 // Batch 0, Channel 0
5116 13.0f, 14.0f, 50.0f,
5117
5118 // Batch 0, Channel 1
5119 15.0f, 16.0f, 51.0f,
5120
5121 // Batch 0, Channel 2
5122 17.0f, 18.0f, 52.0f,
5123
5124 // Batch 1, Channel 0
5125 31.0f, 32.0f, 53.0f,
5126
5127 // Batch 1, Channel 1
5128 33.0f, 34.0f, 54.0f,
5129
5130 // Batch 1, Channel 2
5131 35.0f, 36.0f, 55.0f,
5132 }));
5133
Jim Flynncbb66aa2019-05-15 13:03:54 +01005134 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005135 LayerTestResult<T, 3> result(outputTensorInfo);
5136
5137 std::vector<T> output;
5138 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005139 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005140 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5141 { input0.data(), input1.data(), input2.data() },
5142 outputTensorInfo,
5143 output.data(),
5144 2,
5145 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005146
5147 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5148 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5149 // Batch 0, Channel 0
5150 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5151
5152 // Batch 0, Channel 1
5153 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5154
5155 // Batch 0, Channel 2
5156 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5157
5158 // Batch 1, Channel 0
5159 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5160
5161 // Batch 1, Channel 1
5162 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5163
5164 // Batch 1, Channel 2
5165 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5166 }));
5167
5168 return result;
5169}
5170
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005171LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5172 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5174 bool useSubtensor)
5175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005176 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5177 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005178}
5179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005180template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005181LayerTestResult<T, 4> Concatenation4dTestImpl(
5182 armnn::IWorkloadFactory& workloadFactory,
5183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5184 const armnn::TensorInfo& outputTensorInfo,
5185 unsigned int dimension,
5186 bool useSubtensor,
5187 float qScale,
5188 int32_t qOffset)
5189{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005190 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005191
5192 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5193 1.0f, 2.0f,
5194 3.0f, 4.0f,
5195 5.0f, 6.0f,
5196 7.0f, 8.0f,
5197 9.0f, 10.0f,
5198 11.0f, 12.0f
5199 }));
5200
5201 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5202 11.0f, 12.0f,
5203 13.0f, 14.0f,
5204 15.0f, 16.0f,
5205 17.0f, 18.0f,
5206 19.0f, 20.0f,
5207 21.0f, 22.0f
5208 }));
5209
5210 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5211 21.0f, 22.0f,
5212 23.0f, 24.0f,
5213 25.0f, 26.0f,
5214 27.0f, 28.0f,
5215 29.0f, 30.0f,
5216 31.0f, 32.0f
5217 }));
5218
5219 LayerTestResult<T, 4> result(outputTensorInfo);
5220
5221 std::vector<T> output;
5222 output.resize(outputTensorInfo.GetNumElements());
5223
5224 Concatenate<T>(workloadFactory,
5225 memoryManager,
5226 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5227 {input0.data(), input1.data(), input2.data()},
5228 outputTensorInfo,
5229 output.data(),
5230 dimension,
5231 useSubtensor);
5232
5233 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5234 return result;
5235}
5236
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005237template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005238LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5239 armnn::IWorkloadFactory& workloadFactory,
5240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5241 float qScale,
5242 int32_t qOffset)
5243{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005244 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005246 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5247 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5248
narpra015cdda352018-11-19 15:30:27 +00005249 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5250 1.0f, 2.0f,
5251 3.0f, 4.0f,
5252 5.0f, 6.0f,
5253 7.0f, 8.0f,
5254 9.0f, 10.0f,
5255 11.0f, 12.0f,
5256
5257 11.0f, 12.0f,
5258 13.0f, 14.0f,
5259 15.0f, 16.0f,
5260 17.0f, 18.0f,
5261 19.0f, 20.0f,
5262 21.0f, 22.0f,
5263
5264 21.0f, 22.0f,
5265 23.0f, 24.0f,
5266 25.0f, 26.0f,
5267 27.0f, 28.0f,
5268 29.0f, 30.0f,
5269 31.0f, 32.0f
5270 }));
5271 return result;
5272}
5273
5274LayerTestResult<float, 4> Concatenation4dDim0Test(
5275 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005277{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005278 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005279}
5280
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005281template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005282LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5283 armnn::IWorkloadFactory& workloadFactory,
5284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5285 float qScale,
5286 int32_t qOffset)
5287{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005288 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005289
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005290 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5291 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5292
narpra015cdda352018-11-19 15:30:27 +00005293 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5294 1.0f, 2.0f,
5295 3.0f, 4.0f,
5296 5.0f, 6.0f,
5297 7.0f, 8.0f,
5298 9.0f, 10.0f,
5299 11.0f, 12.0f,
5300
5301 11.0f, 12.0f,
5302 13.0f, 14.0f,
5303 15.0f, 16.0f,
5304 17.0f, 18.0f,
5305 19.0f, 20.0f,
5306 21.0f, 22.0f,
5307
5308 21.0f, 22.0f,
5309 23.0f, 24.0f,
5310 25.0f, 26.0f,
5311 27.0f, 28.0f,
5312 29.0f, 30.0f,
5313 31.0f, 32.0f
5314 }));
5315
5316 return result;
5317}
5318
5319LayerTestResult<float, 4> Concatenation4dDim1Test(
5320 armnn::IWorkloadFactory& workloadFactory,
5321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5322{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005323 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005324}
5325
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005326template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005327LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5328 armnn::IWorkloadFactory& workloadFactory,
5329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5330 float qScale,
5331 int32_t qOffset)
5332{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005333 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005334
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005335 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5336 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5337
narpra015cdda352018-11-19 15:30:27 +00005338 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5339 1.0f, 2.0f,
5340 3.0f, 4.0f,
5341 11.0f, 12.0f,
5342 13.0f, 14.0f,
5343 21.0f, 22.0f,
5344 23.0f, 24.0f,
5345
5346 5.0f, 6.0f,
5347 7.0f, 8.0f,
5348 15.0f, 16.0f,
5349 17.0f, 18.0f,
5350 25.0f, 26.0f,
5351 27.0f, 28.0f,
5352
5353 9.0f, 10.0f,
5354 11.0f, 12.0f,
5355 19.0f, 20.0f,
5356 21.0f, 22.0f,
5357 29.0f, 30.0f,
5358 31.0f, 32.0f
5359 }));
5360
5361 return result;
5362}
5363
5364LayerTestResult<float, 4> Concatenation4dDim2Test(
5365 armnn::IWorkloadFactory& workloadFactory,
5366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5367{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005368 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005369}
5370
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005371template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005372LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5373 armnn::IWorkloadFactory& workloadFactory,
5374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5375 float qScale,
5376 int32_t qOffset,
5377 bool useSubtensor)
5378{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005379 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005380
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005381 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5382 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5383
narpra015cdda352018-11-19 15:30:27 +00005384 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5385 1.0f, 2.0f,
5386 11.0f, 12.0f,
5387 21.0f, 22.0f,
5388 3.0f, 4.0f,
5389 13.0f, 14.0f,
5390 23.0f, 24.0f,
5391
5392 5.0f, 6.0f,
5393 15.0f, 16.0f,
5394 25.0f, 26.0f,
5395 7.0f, 8.0f,
5396 17.0f, 18.0f,
5397 27.0f, 28.0f,
5398
5399 9.0f, 10.0f,
5400 19.0f, 20.0f,
5401 29.0f, 30.0f,
5402 11.0f, 12.0f,
5403 21.0f, 22.0f,
5404 31.0f, 32.0f
5405 }));
5406
5407 return result;
5408}
5409
5410LayerTestResult<float, 4> Concatenation4dDim3Test(
5411 armnn::IWorkloadFactory& workloadFactory,
5412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5413 bool useSubtensor)
5414{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005415 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5416 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005417}
5418
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005419template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005420LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5421 armnn::IWorkloadFactory& workloadFactory,
5422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5423 float qScale,
5424 int32_t qOffset)
5425{
5426 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005427 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005428
5429 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5430 1.0f, 2.0f,
5431 3.0f, 4.0f,
5432 5.0f, 6.0f,
5433 7.0f, 8.0f,
5434 9.0f, 10.0f,
5435 11.0f, 12.0f
5436 }));
5437
Jim Flynncbb66aa2019-05-15 13:03:54 +01005438 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005439
5440 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5441 11.0f, 12.0f,
5442 13.0f, 14.0f,
5443 15.0f, 16.0f,
5444 17.0f, 18.0f,
5445 19.0f, 20.0f,
5446 21.0f, 22.0f,
5447
5448 21.0f, 22.0f,
5449 23.0f, 24.0f,
5450 25.0f, 26.0f,
5451 27.0f, 28.0f,
5452 29.0f, 30.0f,
5453 31.0f, 32.0f
5454
5455 }));
5456
Jim Flynncbb66aa2019-05-15 13:03:54 +01005457 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005458
5459 LayerTestResult<T, 4> result(outputTensorInfo);
5460
5461 std::vector<T> output;
5462 output.resize(outputTensorInfo.GetNumElements());
5463 Concatenate<T>(workloadFactory,
5464 memoryManager,
5465 {inputTensorInfo0, inputTensorInfo1},
5466 {input0.data(), input1.data()},
5467 outputTensorInfo,
5468 output.data(),
5469 dimension,
5470 true);
5471
5472 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5473 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5474 1.0f, 2.0f,
5475 3.0f, 4.0f,
5476 5.0f, 6.0f,
5477 7.0f, 8.0f,
5478 9.0f, 10.0f,
5479 11.0f, 12.0f,
5480
5481 11.0f, 12.0f,
5482 13.0f, 14.0f,
5483 15.0f, 16.0f,
5484 17.0f, 18.0f,
5485 19.0f, 20.0f,
5486 21.0f, 22.0f,
5487
5488 21.0f, 22.0f,
5489 23.0f, 24.0f,
5490 25.0f, 26.0f,
5491 27.0f, 28.0f,
5492 29.0f, 30.0f,
5493 31.0f, 32.0f
5494 }));
5495
5496 return result;
5497}
5498
5499LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5500 armnn::IWorkloadFactory& workloadFactory,
5501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005503 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5504 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005505}
5506
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005507template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005508LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5509 armnn::IWorkloadFactory& workloadFactory,
5510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5511 float qScale,
5512 int32_t qOffset)
5513{
5514 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005515 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005516
5517 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5518 1.0f, 2.0f,
5519 3.0f, 4.0f,
5520 5.0f, 6.0f,
5521 7.0f, 8.0f,
5522 9.0f, 10.0f,
5523 11.0f, 12.0f
5524 }));
5525
Jim Flynncbb66aa2019-05-15 13:03:54 +01005526 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005527
5528 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5529 11.0f, 12.0f,
5530 13.0f, 14.0f,
5531 15.0f, 16.0f,
5532 17.0f, 18.0f,
5533
5534 }));
5535
Jim Flynncbb66aa2019-05-15 13:03:54 +01005536 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005537
5538 LayerTestResult<T, 4> result(outputTensorInfo);
5539
5540 std::vector<T> output;
5541 output.resize(outputTensorInfo.GetNumElements());
5542 Concatenate<T>(workloadFactory,
5543 memoryManager,
5544 {inputTensorInfo0, inputTensorInfo1},
5545 {input0.data(), input1.data()},
5546 outputTensorInfo,
5547 output.data(),
5548 dimension,
5549 true);
5550
5551 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5552 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5553 1.0f, 2.0f,
5554 3.0f, 4.0f,
5555 5.0f, 6.0f,
5556 7.0f, 8.0f,
5557 9.0f, 10.0f,
5558 11.0f, 12.0f,
5559 11.0f, 12.0f,
5560 13.0f, 14.0f,
5561 15.0f, 16.0f,
5562 17.0f, 18.0f
5563 }));
5564
5565 return result;
5566}
5567
5568LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5569 armnn::IWorkloadFactory& workloadFactory,
5570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5571{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005572 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5573 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005574}
5575
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005576template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005577LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5578 armnn::IWorkloadFactory& workloadFactory,
5579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5580 float qScale,
5581 int32_t qOffset)
5582{
5583 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005584 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005585
5586 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5587 1.0f, 2.0f,
5588 3.0f, 4.0f,
5589 5.0f, 6.0f,
5590 7.0f, 8.0f,
5591 9.0f, 10.0f,
5592 11.0f, 12.0f
5593 }));
5594
Jim Flynncbb66aa2019-05-15 13:03:54 +01005595 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005596
5597 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5598 11.0f, 12.0f,
5599 13.0f, 14.0f,
5600 15.0f, 16.0f,
5601 17.0f, 18.0f,
5602 19.0f, 20.0f,
5603 21.0f, 22.0f,
5604 23.0f, 24.0f,
5605 25.0f, 26.0f,
5606 27.0f, 28.0f
5607 }));
5608
Jim Flynncbb66aa2019-05-15 13:03:54 +01005609 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005610
5611 LayerTestResult<T, 4> result(outputTensorInfo);
5612
5613 std::vector<T> output;
5614 output.resize(outputTensorInfo.GetNumElements());
5615 Concatenate<T>(workloadFactory,
5616 memoryManager,
5617 {inputTensorInfo0, inputTensorInfo1},
5618 {input0.data(), input1.data()},
5619 outputTensorInfo,
5620 output.data(),
5621 dimension,
5622 true);
5623
5624 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5625 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5626 1.0f, 2.0f,
5627 3.0f, 4.0f,
5628 11.0f, 12.0f,
5629 13.0f, 14.0f,
5630 15.0f, 16.0f,
5631
5632 5.0f, 6.0f,
5633 7.0f, 8.0f,
5634 17.0f, 18.0f,
5635 19.0f, 20.0f,
5636 21.0f, 22.0f,
5637
5638 9.0f, 10.0f,
5639 11.0f, 12.0f,
5640 23.0f, 24.0f,
5641 25.0f, 26.0f,
5642 27.0f, 28.0f
5643 }));
5644
5645 return result;
5646}
5647
5648LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5649 armnn::IWorkloadFactory& workloadFactory,
5650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005652 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5653 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005654}
5655
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005656template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005657LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5658 armnn::IWorkloadFactory& workloadFactory,
5659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5660 float qScale,
5661 int32_t qOffset,
5662 bool useSubtensor)
5663{
5664 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005665 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005666
5667 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5668 1.0f, 2.0f,
5669 3.0f, 4.0f,
5670 5.0f, 6.0f,
5671 7.0f, 8.0f,
5672 9.0f, 10.0f,
5673 11.0f, 12.0f
5674 }));
5675
Jim Flynncbb66aa2019-05-15 13:03:54 +01005676 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005677
5678 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5679 11.0f, 12.0f, 13.0f,
5680 14.0f, 15.0f, 16.0f,
5681
5682 17.0f, 18.0f, 19.0f,
5683 20.0f, 21.0f, 22.0f,
5684
5685 23.0f, 24.0f, 25.0f,
5686 26.0f, 27.0f, 28.0f
5687 }));
5688
Jim Flynncbb66aa2019-05-15 13:03:54 +01005689 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005690
5691 LayerTestResult<T, 4> result(outputTensorInfo);
5692
5693 std::vector<T> output;
5694 output.resize(outputTensorInfo.GetNumElements());
5695 Concatenate<T>(workloadFactory,
5696 memoryManager,
5697 {inputTensorInfo0, inputTensorInfo1},
5698 {input0.data(), input1.data()},
5699 outputTensorInfo,
5700 output.data(),
5701 dimension,
5702 useSubtensor);
5703
5704 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5705 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5706 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5707 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5708 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5709 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5710 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5711 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5712 }));
5713
5714 return result;
5715}
5716
5717LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5718 armnn::IWorkloadFactory& workloadFactory,
5719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5720 bool useSubtensor)
5721{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005722 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5723 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005724}
5725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005726LayerTestResult<float, 2> FakeQuantizationTest(
5727 armnn::IWorkloadFactory& workloadFactory,
5728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005729{
5730 constexpr unsigned int width = 2;
5731 constexpr unsigned int height = 3;
5732
5733 const armnn::TensorInfo tensorInfo({height, width },
5734 armnn::DataType::Float32);
5735 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5736 -10.0f, -5.0f,
5737 0.0f, 5.0f,
5738 10.0f, 10.0f
5739 }));
5740
5741 LayerTestResult<float, 2> ret(tensorInfo);
5742
5743 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5744
5745 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5746
5747 armnn::FakeQuantizationQueueDescriptor data;
5748 armnn::WorkloadInfo info;
5749
5750 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5751 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5752 float min = -10.f;
5753 float max = 10.f;
5754
5755 data.m_Parameters.m_Min = min;
5756 data.m_Parameters.m_Max = max;
5757
5758 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5759 armnn::FakeQuantizationQueueDescriptor refData = data;
5760 armnn::WorkloadInfo refInfo = info;
5761 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5762
5763 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5764
5765 inputHandle->Allocate();
5766 outputHandle->Allocate();
5767
5768 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5769
Derek Lambertif30f7d32019-04-09 10:25:02 +01005770 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005771 workload->Execute();
5772
5773 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5774
5775 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5776 0.0f, 63.0f,
5777 128.0f, 191.0f,
5778 255.0f, 255.0f
5779 }));
5780 return ret;
5781}
5782
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005783namespace
5784{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005785template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5786LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005787 armnn::IWorkloadFactory& workloadFactory,
5788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5789 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005790 float scale,
5791 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005792 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005793 float outScale,
5794 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005795 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01005796 const armnn::DataLayout layout,
5797 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005798{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005799 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
5800 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005801
jimfly013aab7c32018-11-12 13:32:08 +00005802 // at this point if we require it permute the input data
5803 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5804 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005805 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005806 {
5807 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005808 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005809 inputData = tmp;
5810 }
5811
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005812 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
5813 inputTensorInfo.GetQuantizationScale(),
5814 inputTensorInfo.GetQuantizationOffset(),
5815 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005816
jimfly013aab7c32018-11-12 13:32:08 +00005817 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005818 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005819 {
5820 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005821 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
5822 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005823 expectedOutputData = tmp;
5824 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005825
5826 LayerTestResult<T, 4> result(outputTensorInfo);
5827 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
5828 outputTensorInfo.GetQuantizationScale(),
5829 outputTensorInfo.GetQuantizationOffset(),
5830 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005831
5832 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5833 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5834
5835 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01005836 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00005837 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005838 armnn::WorkloadInfo info;
5839
5840 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5841 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5842
5843 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5844
5845 inputHandle->Allocate();
5846 outputHandle->Allocate();
5847
5848 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5849
Derek Lambertif30f7d32019-04-09 10:25:02 +01005850 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005851 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005852
5853 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5854
5855 return result;
5856}
5857
5858float CalcInvL2Norm(std::initializer_list<float> elements)
5859{
5860 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5861 [](float acc, float element) { return acc + element * element; });
5862 return 1.0f / sqrtf(reduction);
5863}
5864
5865} // anonymous namespace
5866
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005867template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005868LayerTestResult<T, 2> Pad2dTestCommon(
5869 armnn::IWorkloadFactory& workloadFactory,
5870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5871 float qScale,
David Monahan34757812019-06-19 11:47:21 +01005872 int32_t qOffset,
5873 const float customPaddingValue = 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005874{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005875 const armnn::TensorShape inputShape{ 3, 3 };
5876 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005877
David Monahan34757812019-06-19 11:47:21 +01005878 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
5879 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005880
Derek Lambertif30f7d32019-04-09 10:25:02 +01005881 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005882 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005883 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005884 // Height (3) x Width (3)
5885 4, 8, 6,
5886 7, 4, 4,
5887 3, 2, 4
5888 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005889
David Monahan34757812019-06-19 11:47:21 +01005890 const T padValue = ConvertToDataType<T>(customPaddingValue, inputTensorInfo);
5891
5892 std::vector<T> expectedOutputValues;
5893 if (padValue == 0)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005894 {
David Monahan34757812019-06-19 11:47:21 +01005895 expectedOutputValues = (
5896 QuantizedVector<T>(qScale, qOffset,
5897 {
5898 0, 0, 0, 0, 0, 0, 0,
5899 0, 0, 0, 0, 0, 0, 0,
5900 0, 0, 4, 8, 6, 0, 0,
5901 0, 0, 7, 4, 4, 0, 0,
5902 0, 0, 3, 2, 4, 0, 0,
5903 0, 0, 0, 0, 0, 0, 0,
5904 0, 0, 0, 0, 0, 0, 0
5905 }));
5906 }
5907 else
5908 {
5909 expectedOutputValues = (
5910 QuantizedVector<T>(qScale, qOffset,
5911 {
5912 1, 1, 1, 1, 1, 1, 1,
5913 1, 1, 1, 1, 1, 1, 1,
5914 1, 1, 4, 8, 6, 1, 1,
5915 1, 1, 7, 4, 4, 1, 1,
5916 1, 1, 3, 2, 4, 1, 1,
5917 1, 1, 1, 1, 1, 1, 1,
5918 1, 1, 1, 1, 1, 1, 1
5919 }));
5920 }
5921
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005922
Derek Lambertif30f7d32019-04-09 10:25:02 +01005923 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005924
Derek Lambertif30f7d32019-04-09 10:25:02 +01005925 LayerTestResult<T, 2> result(outputTensorInfo);
5926 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Derek Lambertif30f7d32019-04-09 10:25:02 +01005928 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5929 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005930
Derek Lambertif30f7d32019-04-09 10:25:02 +01005931 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005932
Derek Lambertif30f7d32019-04-09 10:25:02 +01005933 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5934 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5935 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005936
Derek Lambertif30f7d32019-04-09 10:25:02 +01005937 descriptor.m_Parameters.m_PadList = PadList;
5938 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005939
Derek Lambertif30f7d32019-04-09 10:25:02 +01005940 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5941 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005942
Derek Lambertif30f7d32019-04-09 10:25:02 +01005943 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005944
Derek Lambertif30f7d32019-04-09 10:25:02 +01005945 inputHandle->Allocate();
5946 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005947
Derek Lambertif30f7d32019-04-09 10:25:02 +01005948 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005949
Derek Lambertif30f7d32019-04-09 10:25:02 +01005950 workload->PostAllocationConfigure();
5951 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005952
Derek Lambertif30f7d32019-04-09 10:25:02 +01005953 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005954
Derek Lambertif30f7d32019-04-09 10:25:02 +01005955 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005956}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005957
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005958template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005959LayerTestResult<T, 3> Pad3dTestCommon(
5960 armnn::IWorkloadFactory& workloadFactory,
5961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5962 float qScale,
5963 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005964{
5965 const armnn::TensorShape inputShape{ 2, 2, 2 };
5966 const armnn::TensorShape outputShape{ 3, 5, 6 };
5967
David Monahan34757812019-06-19 11:47:21 +01005968 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
5969 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005970
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005971 std::vector<T> inputValues(
5972 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005973 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005974 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005975 0, 4,
5976 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005977
5978 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005979 6, 1,
5980 5, 2
5981 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005982
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005983 std::vector<T> expectedOutputValues(
5984 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005985 {
5986
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005987 0, 0, 0, 0, 0, 0,
5988 0, 0, 0, 0, 0, 0,
5989 0, 0, 0, 4, 0, 0,
5990 0, 0, 2, 5, 0, 0,
5991 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005992
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005993 0, 0, 0, 0, 0, 0,
5994 0, 0, 0, 0, 0, 0,
5995 0, 0, 6, 1, 0, 0,
5996 0, 0, 5, 2, 0, 0,
5997 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005998
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005999 0, 0, 0, 0, 0, 0,
6000 0, 0, 0, 0, 0, 0,
6001 0, 0, 0, 0, 0, 0,
6002 0, 0, 0, 0, 0, 0,
6003 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006004
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006005 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006006
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006007 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006008
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006009 LayerTestResult<T, 3> result(outputTensorInfo);
6010 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006011
6012 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6013 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6014
6015 armnn::PadQueueDescriptor descriptor;
6016
6017 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6018 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6019 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6020 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6021
6022 descriptor.m_Parameters.m_PadList = PadList;
6023 armnn::WorkloadInfo info;
6024
6025 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6026 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6027
6028 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6029
6030 inputHandle->Allocate();
6031 outputHandle->Allocate();
6032
6033 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6034
Derek Lambertif30f7d32019-04-09 10:25:02 +01006035 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006036 workload->Execute();
6037
6038 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6039
6040 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006041}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006042
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006043template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006044LayerTestResult<T, 4> Pad4dTestCommon(
6045 armnn::IWorkloadFactory& workloadFactory,
6046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6047 float qScale,
6048 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006049{
6050 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6051 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6052
David Monahan34757812019-06-19 11:47:21 +01006053 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6054 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006055
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006056 std::vector<T> inputValues(
6057 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006058 {
6059 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006060 0, 1,
6061 2, 3,
6062 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006063
6064 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006065 6, 7,
6066 8, 9,
6067 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006068
6069 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006070 12, 13,
6071 14, 15,
6072 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006073
6074 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006075 18, 19,
6076 20, 21,
6077 22, 23
6078 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006079
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006080 std::vector<T> expectedOutputValues(
6081 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006082 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006083 0, 0, 0, 0,
6084 0, 0, 0, 0,
6085 0, 0, 0, 0,
6086 0, 0, 0, 0,
6087 0, 0, 0, 0,
6088 0, 0, 0, 0,
6089 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006090
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006091 0, 0, 0, 0,
6092 0, 0, 0, 0,
6093 0, 0, 0, 0,
6094 0, 0, 0, 0,
6095 0, 0, 0, 0,
6096 0, 0, 0, 0,
6097 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006098
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006099 0, 0, 0, 0,
6100 0, 0, 0, 0,
6101 0, 0, 0, 0,
6102 0, 0, 0, 0,
6103 0, 0, 0, 0,
6104 0, 0, 0, 0,
6105 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006106
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006107 0, 0, 0, 0,
6108 0, 0, 0, 0,
6109 0, 0, 0, 0,
6110 0, 0, 0, 0,
6111 0, 0, 0, 0,
6112 0, 0, 0, 0,
6113 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006114
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006115 0, 0, 0, 0,
6116 0, 0, 0, 0,
6117 0, 0, 0, 0,
6118 0, 0, 0, 0,
6119 0, 0, 0, 0,
6120 0, 0, 0, 0,
6121 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006122
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006123 0, 0, 0, 0,
6124 0, 0, 0, 0,
6125 0, 0, 0, 0,
6126 0, 0, 0, 0,
6127 0, 0, 0, 0,
6128 0, 0, 0, 0,
6129 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006130
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006131 0, 0, 0, 0,
6132 0, 0, 0, 0,
6133 0, 0, 0, 0,
6134 0, 0, 0, 0,
6135 0, 0, 0, 0,
6136 0, 0, 0, 0,
6137 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006138
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006139 0, 0, 0, 0,
6140 0, 0, 0, 0,
6141 0, 0, 0, 0,
6142 0, 0, 1, 0,
6143 0, 2, 3, 0,
6144 0, 4, 5, 0,
6145 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006146
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006147 0, 0, 0, 0,
6148 0, 0, 0, 0,
6149 0, 0, 0, 0,
6150 0, 6, 7, 0,
6151 0, 8, 9, 0,
6152 0, 10, 11, 0,
6153 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006154
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006155 0, 0, 0, 0,
6156 0, 0, 0, 0,
6157 0, 0, 0, 0,
6158 0, 0, 0, 0,
6159 0, 0, 0, 0,
6160 0, 0, 0, 0,
6161 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006162
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006163 0, 0, 0, 0,
6164 0, 0, 0, 0,
6165 0, 0, 0, 0,
6166 0, 0, 0, 0,
6167 0, 0, 0, 0,
6168 0, 0, 0, 0,
6169 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006170
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006171 0, 0, 0, 0,
6172 0, 0, 0, 0,
6173 0, 0, 0, 0,
6174 0, 0, 0, 0,
6175 0, 0, 0, 0,
6176 0, 0, 0, 0,
6177 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006178
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006179 0, 0, 0, 0,
6180 0, 0, 0, 0,
6181 0, 0, 0, 0,
6182 0, 12, 13, 0,
6183 0, 14, 15, 0,
6184 0, 16, 17, 0,
6185 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006186
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006187 0, 0, 0, 0,
6188 0, 0, 0, 0,
6189 0, 0, 0, 0,
6190 0, 18, 19, 0,
6191 0, 20, 21, 0,
6192 0, 22, 23, 0,
6193 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006194
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006195 0, 0, 0, 0,
6196 0, 0, 0, 0,
6197 0, 0, 0, 0,
6198 0, 0, 0, 0,
6199 0, 0, 0, 0,
6200 0, 0, 0, 0,
6201 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006202
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006203 0, 0, 0, 0,
6204 0, 0, 0, 0,
6205 0, 0, 0, 0,
6206 0, 0, 0, 0,
6207 0, 0, 0, 0,
6208 0, 0, 0, 0,
6209 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006210
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006211 0, 0, 0, 0,
6212 0, 0, 0, 0,
6213 0, 0, 0, 0,
6214 0, 0, 0, 0,
6215 0, 0, 0, 0,
6216 0, 0, 0, 0,
6217 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006218
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006219 0, 0, 0, 0,
6220 0, 0, 0, 0,
6221 0, 0, 0, 0,
6222 0, 0, 0, 0,
6223 0, 0, 0, 0,
6224 0, 0, 0, 0,
6225 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006226
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006227 0, 0, 0, 0,
6228 0, 0, 0, 0,
6229 0, 0, 0, 0,
6230 0, 0, 0, 0,
6231 0, 0, 0, 0,
6232 0, 0, 0, 0,
6233 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006234
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006235 0, 0, 0, 0,
6236 0, 0, 0, 0,
6237 0, 0, 0, 0,
6238 0, 0, 0, 0,
6239 0, 0, 0, 0,
6240 0, 0, 0, 0,
6241 0, 0, 0, 0
6242 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006243
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006244 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006245
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006246 LayerTestResult<T, 4> result(outputTensorInfo);
6247 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006248
6249 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6250 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6251
6252 armnn::PadQueueDescriptor descriptor;
6253
6254 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6255 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6256 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6257 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6258 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6259
6260 descriptor.m_Parameters.m_PadList = PadList;
6261 armnn::WorkloadInfo info;
6262
6263 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6264 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6265
6266 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6267
6268 inputHandle->Allocate();
6269 outputHandle->Allocate();
6270
6271 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6272
Derek Lambertif30f7d32019-04-09 10:25:02 +01006273 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006274 workload->Execute();
6275
6276 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6277
6278 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006279}
6280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006281LayerTestResult<uint8_t, 2> PadUint82dTest(
6282 armnn::IWorkloadFactory& workloadFactory,
6283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006284{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006285 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006286}
6287
David Monahan34757812019-06-19 11:47:21 +01006288LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6289 armnn::IWorkloadFactory& workloadFactory,
6290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6291{
6292 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
6293}
6294
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006295LayerTestResult<uint8_t, 3> PadUint83dTest(
6296 armnn::IWorkloadFactory& workloadFactory,
6297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006298{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006299 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006300}
6301
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006302LayerTestResult<uint8_t, 4> PadUint84dTest(
6303 armnn::IWorkloadFactory& workloadFactory,
6304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006305{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006306 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006307}
6308
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006309LayerTestResult<float, 2> PadFloat322dTest(
6310 armnn::IWorkloadFactory& workloadFactory,
6311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006312{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006313 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006314}
6315
David Monahan34757812019-06-19 11:47:21 +01006316LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6317 armnn::IWorkloadFactory& workloadFactory,
6318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6319{
6320 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
6321}
6322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006323LayerTestResult<float, 3> PadFloat323dTest(
6324 armnn::IWorkloadFactory& workloadFactory,
6325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006327 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006328}
6329
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006330LayerTestResult<float, 4> PadFloat324dTest(
6331 armnn::IWorkloadFactory& workloadFactory,
6332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006334 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006335}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006336
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006337template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006338LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6339 armnn::IWorkloadFactory& workloadFactory,
6340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6341 float scale,
6342 int32_t offset,
6343 float outScale,
6344 int32_t outOffset,
6345 const armnn::DataLayout layout,
6346 float epsilon)
6347{
6348 // Width: 1
6349 // Height: 1
6350 // Channels: 3
6351 // BatchSize: 1
6352 unsigned int numberOfBatches = 1;
6353 unsigned int numberOfChannels = 3;
6354 unsigned int height = 1;
6355 unsigned int width = 1;
6356
6357 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6358 numberOfBatches, numberOfChannels, height, width, layout);
6359
6360 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6361 std::vector<float> inputValues
6362 {
6363 // Batch 0, Channel 0, Height (1) x Width (1)
6364 0.00000001f,
6365
6366 // Batch 0, Channel 1, Height (1) x Width (1)
6367 0.00000002f,
6368
6369 // Batch 0, Channel 2, Height (1) x Width (1)
6370 0.00000003f,
6371 };
6372
6373 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6374 std::vector<float> expectedOutputValues
6375 {
6376 // Batch 0, Channel 0, Height (1) x Width (1)
6377 0.00000001f * approxInvL2Norm,
6378 0.00000002f * approxInvL2Norm,
6379 0.00000003f * approxInvL2Norm,
6380 };
6381
6382 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6383 inputValues, outScale, outOffset, expectedOutputValues, layout,
6384 epsilon);
6385}
6386
6387
6388template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006389LayerTestResult<T, 4> L2Normalization1dTestCommon(
6390 armnn::IWorkloadFactory& workloadFactory,
6391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006392 float scale,
6393 int32_t offset,
6394 float outScale,
6395 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006396 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006397{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006398 // Width: 1
6399 // Height: 1
6400 // Channels: 10
6401 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006402 unsigned int numberOfBatches = 1;
6403 unsigned int numberOfChannels = 10;
6404 unsigned int height = 1;
6405 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006406
jimfly013aab7c32018-11-12 13:32:08 +00006407
Nina Drozdd41b2592018-11-19 13:03:36 +00006408 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006409 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006410 std::vector<float> inputValues
6411 {
6412 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006413 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006414
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006415 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006416 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006417
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006418 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006419 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006420
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006421 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006422 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006423
6424 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006425 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006426
6427 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006428 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006429
6430 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006431 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006432
6433 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006434 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006435
6436 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006437 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006438
6439 // Batch 0, Channel 9, Height (1) x Width (1)
6440 10.0f
6441 };
telsoa014fcda012018-03-09 14:13:49 +00006442 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006443 std::vector<float> expectedOutputValues
6444 {
6445 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006446 1.0f * approxInvL2Norm,
6447 2.0f * approxInvL2Norm,
6448 3.0f * approxInvL2Norm,
6449 4.0f * approxInvL2Norm,
6450 5.0f * approxInvL2Norm,
6451 6.0f * approxInvL2Norm,
6452 7.0f * approxInvL2Norm,
6453 8.0f * approxInvL2Norm,
6454 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006455 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006456 };
telsoa014fcda012018-03-09 14:13:49 +00006457
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006458
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006459 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6460 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006461}
6462
Ferran Balaguere52211e2019-06-17 12:23:52 +01006463LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6464 armnn::IWorkloadFactory& workloadFactory,
6465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6466 const armnn::DataLayout layout)
6467{
6468 // Dummy descriptor to get the default value of epsilon.
6469 armnn::L2NormalizationDescriptor descriptor;
6470
6471 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6472 layout, descriptor.m_Eps);
6473}
6474
6475LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6476 armnn::IWorkloadFactory& workloadFactory,
6477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6478 const armnn::DataLayout layout)
6479{
6480 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6481 layout, 1e-9f);
6482}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006483
6484LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006485 armnn::IWorkloadFactory& workloadFactory,
6486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006487 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006488{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006489 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006490}
6491
6492LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6493 armnn::IWorkloadFactory& workloadFactory,
6494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6495 const armnn::DataLayout layout)
6496{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006497 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006498 layout);
6499}
6500
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006501LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6502 armnn::IWorkloadFactory& workloadFactory,
6503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6504 const armnn::DataLayout layout)
6505{
6506 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6507 1.f/128, 128, layout);
6508}
6509
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006510template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6511LayerTestResult<T, 4> L2Normalization2dTestCommon(
6512 armnn::IWorkloadFactory& workloadFactory,
6513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006514 float scale,
6515 int32_t offset,
6516 float outScale,
6517 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006518 const armnn::DataLayout layout)
6519{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006520 // Width: 5
6521 // Height: 1
6522 // Channels: 2
6523 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006524 unsigned int numberOfBatches = 1;
6525 unsigned int numberOfChannels = 2;
6526 unsigned int height = 1;
6527 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006528
Nina Drozdd41b2592018-11-19 13:03:36 +00006529 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006530 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006531 std::vector<float> inputValues
6532 {
6533 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006534 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006535
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006536 // Batch 0, Channel 1, Height (1) x Width (5)
6537 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6538 };
6539 std::vector<float> expectedOutputValues
6540 {
6541 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006542 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6543 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6544 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6545 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6546 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006547
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006548 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006549 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6550 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6551 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6552 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006553 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006554 };
telsoa014fcda012018-03-09 14:13:49 +00006555
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006556 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6557 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006558}
telsoa014fcda012018-03-09 14:13:49 +00006559
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006560LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006561 armnn::IWorkloadFactory& workloadFactory,
6562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006563 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006564{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006565 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6566 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006567}
6568
6569LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6570 armnn::IWorkloadFactory& workloadFactory,
6571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6572 const armnn::DataLayout layout)
6573{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006574 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006575 layout);
6576}
6577
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006578LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6579 armnn::IWorkloadFactory& workloadFactory,
6580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6581 const armnn::DataLayout layout)
6582{
6583 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6584 1.f/128, 128, layout);
6585}
6586
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006587template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6588LayerTestResult<T, 4> L2Normalization3dTestCommon(
6589 armnn::IWorkloadFactory& workloadFactory,
6590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006591 float scale,
6592 int32_t offset,
6593 float outScale,
6594 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006595 const armnn::DataLayout layout)
6596{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006597 // Width: 3
6598 // Height: 4
6599 // Channels: 2
6600 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006601 unsigned int numberOfBatches = 1;
6602 unsigned int numberOfChannels = 2;
6603 unsigned int height = 4;
6604 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006605
Nina Drozdd41b2592018-11-19 13:03:36 +00006606 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006607 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006608 std::vector<float> inputValues
6609 {
6610 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006611 119.0f, 21.0f, 150.0f,
6612 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006613 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006614 147.0f, 199.0f, 220.0f,
6615
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006616 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006617 110.0f, 140.0f, 73.0f,
6618 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006619 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006620 162.0f, 12.0f, 161.0f
6621 };
6622 std::vector<float> expectedOutputValues
6623 {
6624 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006625 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006626 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006627 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6628 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006629 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006630 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006631 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006632 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6633 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6634 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6635 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6636 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6637
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006638 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006639 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6640 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006641 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006642 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6643 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006644 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6645 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006646 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6647 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6648 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006649 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006650 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6651 };
telsoa014fcda012018-03-09 14:13:49 +00006652
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006653 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6654 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006655}
telsoa014fcda012018-03-09 14:13:49 +00006656
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006657LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006658 armnn::IWorkloadFactory& workloadFactory,
6659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006660 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006661{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006662 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6663 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006664}
6665
6666LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6667 armnn::IWorkloadFactory& workloadFactory,
6668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6669 const armnn::DataLayout layout)
6670{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006671 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006672 layout);
6673}
6674
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006675LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6676 armnn::IWorkloadFactory& workloadFactory,
6677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6678 const armnn::DataLayout layout)
6679{
6680 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6681 1.f/128, 128, layout);
6682}
6683
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006684template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6685LayerTestResult<T, 4> L2Normalization4dTestCommon(
6686 armnn::IWorkloadFactory& workloadFactory,
6687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006688 float scale,
6689 int32_t offset,
6690 float outScale,
6691 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006692 const armnn::DataLayout layout)
6693{
6694 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006695 // Height: 4
6696 // Channels: 3
6697 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006698 unsigned int numberOfBatches = 2;
6699 unsigned int numberOfChannels = 3;
6700 unsigned int height = 4;
6701 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006702
Nina Drozdd41b2592018-11-19 13:03:36 +00006703 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006704 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006705 std::vector<float> inputValues
6706 {
6707 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006708 235.0f, 46.0f, 178.0f,
6709 100.0f, 123.0f, 19.0f,
6710 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006711 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006712
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006713 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006714 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006715 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006716 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006717 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006718
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006719 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006720 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006721 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006722 12.0f, 209.0f, 200.0f,
6723 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006724
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006725 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006726 67.0f, 90.0f, 49.0f,
6727 7.0f, 163.0f, 18.0f,
6728 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006729 247.0f, 59.0f, 189.0f,
6730
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006731 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006732 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006733 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006734 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006735 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006736
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006737 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006738 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006739 115.0f, 116.0f, 238.0f,
6740 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006741 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006742 };
6743 std::vector<float> expectedOutputValues
6744 {
6745 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006746 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006747 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006748 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6749 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6750 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006751 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006752 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006753 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006754 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006755 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006756 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006757 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006758
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006759 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006760 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006761 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006762 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006763 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006764 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006765 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006766 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6767 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6768 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006769 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6770 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6771 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006772
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006773 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006774 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006775 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6776 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6777 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006778 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006779 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006780 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006781 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6782 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006783 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6784 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6785 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006786
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006787 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006788 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6789 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6790 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6791 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006792 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006793 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6794 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006795 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6796 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6797 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006798 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006799 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6800
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006801 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006802 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6803 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6804 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006805 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006806 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6807 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6808 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6809 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006810 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6811 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006812 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006813 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006814
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006815 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006816 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006817 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6818 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6819 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6820 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6821 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6822 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006823 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006824 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006825 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006826 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006827 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006828 };
telsoa014fcda012018-03-09 14:13:49 +00006829
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006830 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6831 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006832}
6833
6834LayerTestResult<float, 4> L2Normalization4dTest(
6835 armnn::IWorkloadFactory& workloadFactory,
6836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6837 const armnn::DataLayout layout)
6838{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006839 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6840 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006841}
6842
6843LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
6844 armnn::IWorkloadFactory& workloadFactory,
6845 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6846 const armnn::DataLayout layout)
6847{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006848 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006849 layout);
telsoa014fcda012018-03-09 14:13:49 +00006850}
6851
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006852LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
6853 armnn::IWorkloadFactory& workloadFactory,
6854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6855 const armnn::DataLayout layout)
6856{
6857 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6858 1.f/128, 128, layout);
6859}
6860
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006861template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006862LayerTestResult<T, 4> ConstantTestImpl(
6863 armnn::IWorkloadFactory& workloadFactory,
6864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006865 float qScale,
6866 int32_t qOffset)
6867{
6868 constexpr unsigned int inputWidth = 3;
6869 constexpr unsigned int inputHeight = 4;
6870 constexpr unsigned int inputChannels = 3;
6871 constexpr unsigned int inputBatchSize = 2;
6872
6873 constexpr unsigned int outputWidth = inputWidth;
6874 constexpr unsigned int outputHeight = inputHeight;
6875 constexpr unsigned int outputChannels = inputChannels;
6876 constexpr unsigned int outputBatchSize = inputBatchSize;
6877
Nina Drozd58ef2c62019-05-16 12:09:18 +01006878 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6879 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006880
Nina Drozd58ef2c62019-05-16 12:09:18 +01006881 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6882 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006883
6884 // Set quantization parameters if the requested type is a quantized type.
6885 if(armnn::IsQuantizedType<T>())
6886 {
6887 inputTensorInfo.SetQuantizationScale(qScale);
6888 inputTensorInfo.SetQuantizationOffset(qOffset);
6889 outputTensorInfo.SetQuantizationScale(qScale);
6890 outputTensorInfo.SetQuantizationOffset(qOffset);
6891 }
6892
6893 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6894 QuantizedVector<T>(qScale, qOffset, {
6895 // Batch 0, Channel 0
6896 235.0f, 46.0f, 178.0f,
6897 100.0f, 123.0f, 19.0f,
6898 172.0f, 74.0f, 250.0f,
6899 6.0f, 195.0f, 80.0f,
6900
6901 // Batch 0, Channel 1
6902 113.0f, 95.0f, 202.0f,
6903 77.0f, 114.0f, 71.0f,
6904 122.0f, 246.0f, 166.0f,
6905 82.0f, 28.0f, 37.0f,
6906
6907 // Batch 0, Channel 2
6908 56.0f, 170.0f, 162.0f,
6909 194.0f, 89.0f, 254.0f,
6910 12.0f, 209.0f, 200.0f,
6911 1.0f, 64.0f, 54.0f,
6912
6913 // Batch 1, Channel 0
6914 67.0f, 90.0f, 49.0f,
6915 7.0f, 163.0f, 18.0f,
6916 25.0f, 117.0f, 103.0f,
6917 247.0f, 59.0f, 189.0f,
6918
6919 // Batch 1, Channel 1
6920 239.0f, 104.0f, 199.0f,
6921 17.0f, 124.0f, 153.0f,
6922 222.0f, 217.0f, 75.0f,
6923 32.0f, 126.0f, 21.0f,
6924
6925 // Batch 1, Channel 2
6926 97.0f, 145.0f, 215.0f,
6927 115.0f, 116.0f, 238.0f,
6928 226.0f, 16.0f, 132.0f,
6929 92.0f, 125.0f, 88.0f,
6930 })));
6931
6932 LayerTestResult<T, 4> result(outputTensorInfo);
6933 result.outputExpected = input;
6934
6935 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6936
6937 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6938 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6939
6940 armnn::ConstantQueueDescriptor descriptor;
6941 descriptor.m_LayerOutput = &constantTensor;
6942
6943 armnn::WorkloadInfo info;
6944 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6945
6946 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6947
6948 outputHandle->Allocate();
6949
Derek Lambertif30f7d32019-04-09 10:25:02 +01006950 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006951 workload->Execute();
6952
6953 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6954 return result;
6955}
6956
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006957LayerTestResult<float, 4> ConstantTest(
6958 armnn::IWorkloadFactory& workloadFactory,
6959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006960{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006961 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006962}
6963
Nina Drozd58ef2c62019-05-16 12:09:18 +01006964LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6965 armnn::IWorkloadFactory& workloadFactory,
6966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6967{
6968 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6969}
6970
6971LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006972 armnn::IWorkloadFactory& workloadFactory,
6973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006975 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006976}
6977
Jim Flynn4ed6c832019-05-20 11:02:46 +01006978LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006979 armnn::IWorkloadFactory& workloadFactory,
6980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6981{
6982 unsigned int outputWidth = 3;
6983 unsigned int outputHeight = 6;
6984 unsigned int outputChannels = 3;
6985
6986 unsigned int inputWidth1 = 3;
6987 unsigned int inputHeight1 = 6;
6988 unsigned int inputChannels1 = 2;
6989
6990 unsigned int inputWidth2 = 3;
6991 unsigned int inputHeight2 = 6;
6992 unsigned int inputChannels2 = 1;
6993
6994 // Defines the tensor descriptors.
6995 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6996 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6997 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6998
6999 // Quantized input1 tensor. Range [-3, 1]
7000 const float inputScale1 = 0.015686f;
7001 const int32_t inputOffset1 = 192;
7002
7003 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7004 {
7005 1, 2, 3,
7006 4, 5, 6,
7007 7, 8, 9,
7008 10, 11, 12,
7009 13, 14, 15,
7010 16, 17, 18,
7011
7012 19, 20, 21,
7013 22, 23, 24,
7014 25, 26, 27,
7015 28, 29, 30,
7016 31, 32, 33,
7017 34, 35, 36,
7018 })
7019 );
7020
7021 // Quatized input2 tensor. Range [-1, 4]
7022 const float inputScale2 = 0.019608f;
7023 const int32_t inputOffset2 = 50;
7024
7025 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7026 {
7027 37, 38, 39,
7028 40, 41, 42,
7029 43, 44, 45,
7030 46, 47, 48,
7031 49, 50, 51,
7032 52, 53, 54,
7033 })
7034 );
7035
7036 // Output has the same quantization parameters than input1,
7037 // so that only the requantization of input2 is required
7038 const float outputScale = 0.015686f;
7039 const int32_t outputOffset = 192;
7040
7041 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7042
7043 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7044 {
7045 1, 2, 3,
7046 4, 5, 6,
7047 7, 8, 9,
7048 10, 11, 12,
7049 13, 14, 15,
7050 16, 17, 18,
7051
7052 19, 20, 21,
7053 22, 23, 24,
7054 25, 26, 27,
7055 28, 29, 30,
7056 31, 32, 33,
7057 34, 35, 36,
7058
7059 176, 177, 178,
7060 179, 181, 182,
7061 183, 184, 186,
7062 187, 188, 189,
7063 191, 192, 193,
7064 195, 196, 197,
7065 })
7066 );
7067
7068 outputTensorInfo.SetQuantizationScale(outputScale);
7069 outputTensorInfo.SetQuantizationOffset(outputOffset);
7070 inputTensorInfo1.SetQuantizationScale(inputScale1);
7071 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7072 inputTensorInfo2.SetQuantizationScale(inputScale2);
7073 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7074
7075 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007076 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007077
7078 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007079 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007080
7081 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7082
7083 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7084
7085 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7086 subTensorsSupported ?
7087 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7088 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7089
7090 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7091 subTensorsSupported ?
7092 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7093 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7094
Jim Flynne242f2d2019-05-22 14:24:13 +01007095 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007096 armnn::WorkloadInfo info;
7097 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7098 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7099 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7100
7101 data.m_ViewOrigins.push_back(window1);
7102 data.m_ViewOrigins.push_back(window2);
7103
Jim Flynn4ed6c832019-05-20 11:02:46 +01007104 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007105
7106 inputHandle1->Allocate();
7107 inputHandle2->Allocate();
7108 outputHandle->Allocate();
7109
7110 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7111 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7112
Derek Lambertif30f7d32019-04-09 10:25:02 +01007113 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007114 workload->Execute();
7115
7116 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7117
7118 return ret;
7119}
7120
Jim Flynn4ed6c832019-05-20 11:02:46 +01007121LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007122 armnn::IWorkloadFactory& workloadFactory,
7123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007124{
surmeh013537c2c2018-05-18 16:31:43 +01007125 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007126 unsigned int outputHeight = 6;
7127 unsigned int outputChannels = 3;
7128
surmeh013537c2c2018-05-18 16:31:43 +01007129 unsigned int inputWidth1 = 3;
7130 unsigned int inputHeight1 = 6;
7131 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007132
surmeh013537c2c2018-05-18 16:31:43 +01007133 unsigned int inputWidth2 = 3;
7134 unsigned int inputHeight2 = 6;
7135 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007136
telsoa01c577f2c2018-08-31 09:22:23 +01007137 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007138 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7139 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7140 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007141
Jim Flynn4ed6c832019-05-20 11:02:46 +01007142 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007143 const float scale = 0.13497836f;
7144 const int32_t offset = -7;
7145
7146 outputTensorInfo.SetQuantizationScale(scale);
7147 outputTensorInfo.SetQuantizationOffset(offset);
7148 inputTensorInfo1.SetQuantizationScale(scale);
7149 inputTensorInfo1.SetQuantizationOffset(offset);
7150 inputTensorInfo2.SetQuantizationScale(scale);
7151 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007152
7153 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7154
7155 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007156 {
7157 1, 2, 3,
7158 4, 5, 6,
7159 7, 8, 9,
7160 10, 11, 12,
7161 13, 14, 15,
7162 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007163
surmeh013537c2c2018-05-18 16:31:43 +01007164 19, 20, 21,
7165 22, 23, 24,
7166 25, 26, 27,
7167 28, 29, 30,
7168 31, 32, 33,
7169 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007170
surmeh013537c2c2018-05-18 16:31:43 +01007171 37, 38, 39,
7172 40, 41, 42,
7173 43, 44, 45,
7174 46, 47, 48,
7175 49, 50, 51,
7176 52, 53, 54,
7177 })
telsoa014fcda012018-03-09 14:13:49 +00007178 );
7179
telsoa014fcda012018-03-09 14:13:49 +00007180 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7181 {
surmeh013537c2c2018-05-18 16:31:43 +01007182 1, 2, 3,
7183 4, 5, 6,
7184 7, 8, 9,
7185 10, 11, 12,
7186 13, 14, 15,
7187 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007188
surmeh013537c2c2018-05-18 16:31:43 +01007189 19, 20, 21,
7190 22, 23, 24,
7191 25, 26, 27,
7192 28, 29, 30,
7193 31, 32, 33,
7194 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007195 })
7196 );
7197
7198 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7199 {
surmeh013537c2c2018-05-18 16:31:43 +01007200 37, 38, 39,
7201 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007202 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007203 46, 47, 48,
7204 49, 50, 51,
7205 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007206 })
7207 );
7208
telsoa01c577f2c2018-08-31 09:22:23 +01007209 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007210 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007211
telsoa01c577f2c2018-08-31 09:22:23 +01007212 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007213 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007214
telsoa014fcda012018-03-09 14:13:49 +00007215
7216 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7217
7218 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7219
7220 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7221 subTensorsSupported ?
7222 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7223 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7224
7225 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7226 subTensorsSupported ?
7227 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7228 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7229
telsoa014fcda012018-03-09 14:13:49 +00007230
Jim Flynne242f2d2019-05-22 14:24:13 +01007231 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007232 armnn::WorkloadInfo info;
7233 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7234 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007235 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7236
7237 data.m_ViewOrigins.push_back(window1);
7238 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007239
Jim Flynn4ed6c832019-05-20 11:02:46 +01007240 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007241
7242 inputHandle1->Allocate();
7243 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007244 outputHandle->Allocate();
7245
7246 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7247 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007248
Derek Lambertif30f7d32019-04-09 10:25:02 +01007249 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007250 workload->Execute();
7251
7252 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7253
7254 return ret;
7255}
7256
Jim Flynn4ed6c832019-05-20 11:02:46 +01007257LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007258 armnn::IWorkloadFactory& workloadFactory,
7259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7260{
7261 unsigned int outputWidth = 3;
7262 unsigned int outputHeight = 6;
7263 unsigned int outputChannels = 3;
7264
7265 unsigned int inputWidth1 = 3;
7266 unsigned int inputHeight1 = 6;
7267 unsigned int inputChannels1 = 2;
7268
7269 unsigned int inputWidth2 = 3;
7270 unsigned int inputHeight2 = 6;
7271 unsigned int inputChannels2 = 1;
7272
7273 // Defines the tensor descriptors.
7274 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7275 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7276 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7277
Jim Flynn4ed6c832019-05-20 11:02:46 +01007278 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007279 const float scale = 0.13497836f;
7280 const int32_t offset = -7;
7281
7282 outputTensorInfo.SetQuantizationScale(scale);
7283 outputTensorInfo.SetQuantizationOffset(offset);
7284 inputTensorInfo1.SetQuantizationScale(scale);
7285 inputTensorInfo1.SetQuantizationOffset(offset);
7286 inputTensorInfo2.SetQuantizationScale(scale);
7287 inputTensorInfo2.SetQuantizationOffset(offset);
7288
7289 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7290
7291 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7292 {
7293 1, 2, 3,
7294 4, 5, 6,
7295 7, 8, 9,
7296 10, 11, 12,
7297 13, 14, 15,
7298 16, 17, 18,
7299
7300 19, 20, 21,
7301 22, 23, 24,
7302 25, 26, 27,
7303 28, 29, 30,
7304 31, 32, 33,
7305 34, 35, 36,
7306
7307 37, 38, 39,
7308 40, 41, 42,
7309 43, 44, 45,
7310 46, 47, 48,
7311 49, 50, 51,
7312 52, 53, 54,
7313 }));
7314
7315 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7316 {
7317 1, 2, 3,
7318 4, 5, 6,
7319 7, 8, 9,
7320 10, 11, 12,
7321 13, 14, 15,
7322 16, 17, 18,
7323
7324 19, 20, 21,
7325 22, 23, 24,
7326 25, 26, 27,
7327 28, 29, 30,
7328 31, 32, 33,
7329 34, 35, 36,
7330 }));
7331
7332 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7333 {
7334 37, 38, 39,
7335 40, 41, 42,
7336 43, 44, 45,
7337 46, 47, 48,
7338 49, 50, 51,
7339 52, 53, 54,
7340 }));
7341
7342 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007343 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007344
7345 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007346 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007347
7348
7349 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7350
7351 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7352
7353 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7354 subTensorsSupported ?
7355 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7356 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7357
7358 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7359 subTensorsSupported ?
7360 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7361 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7362
7363
Jim Flynne242f2d2019-05-22 14:24:13 +01007364 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007365 armnn::WorkloadInfo info;
7366 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7367 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7368 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7369
7370 data.m_ViewOrigins.push_back(window1);
7371 data.m_ViewOrigins.push_back(window2);
7372
Jim Flynn4ed6c832019-05-20 11:02:46 +01007373 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007374
7375 inputHandle1->Allocate();
7376 inputHandle2->Allocate();
7377 outputHandle->Allocate();
7378
7379 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7380 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7381
7382 workload->PostAllocationConfigure();
7383 workload->Execute();
7384
7385 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7386
7387 return ret;
7388}
telsoa014fcda012018-03-09 14:13:49 +00007389
surmeh01bceff2f2018-03-29 16:29:27 +01007390namespace
telsoa014fcda012018-03-09 14:13:49 +00007391{
Sadik Armagan2999a022019-04-09 14:20:12 +01007392template <typename T>
7393LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007394 armnn::IWorkloadFactory& workloadFactory,
7395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7396 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007397 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007398 float scale0,
7399 int32_t offset0,
7400 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007401 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007402 float scale1,
7403 int32_t offset1,
7404 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007405 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007406 float outScale,
7407 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007408{
Sadik Armagan2999a022019-04-09 14:20:12 +01007409 auto dataType = (std::is_same<T, uint8_t>::value ?
7410 armnn::DataType::QuantisedAsymm8 :
7411 armnn::DataType::QuantisedSymm16);
7412
7413 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7414 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7415 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007416
surmeh01bceff2f2018-03-29 16:29:27 +01007417 inputTensorInfo0.SetQuantizationScale(scale0);
7418 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007419
surmeh01bceff2f2018-03-29 16:29:27 +01007420 inputTensorInfo1.SetQuantizationScale(scale1);
7421 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007422
surmeh01bceff2f2018-03-29 16:29:27 +01007423 outputTensorInfo.SetQuantizationScale(outScale);
7424 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007425
Sadik Armagan2999a022019-04-09 14:20:12 +01007426 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7427 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007428
Sadik Armagan2999a022019-04-09 14:20:12 +01007429 LayerTestResult<T, 4> result(outputTensorInfo);
7430 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7431
7432 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7433 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7434 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7435
7436 armnn::AdditionQueueDescriptor data;
7437 armnn::WorkloadInfo info;
7438 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7439 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7440 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7441
7442 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7443
7444 inputHandle0->Allocate();
7445 inputHandle1->Allocate();
7446 outputHandle->Allocate();
7447
7448 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7449 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7450
Derek Lambertif30f7d32019-04-09 10:25:02 +01007451 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007452 workload->Execute();
7453
7454 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7455
7456 return result;
7457}
7458} // anonymous namespace
7459
7460LayerTestResult<uint8_t, 4> AdditionUint8Test(
7461 armnn::IWorkloadFactory& workloadFactory,
7462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7463{
7464 const unsigned int shape0[] = { 1, 2, 2, 3 };
7465 const unsigned int shape1[] = { 1, 2, 2, 3 };
7466
7467 std::vector<uint8_t> input0(
7468 {
7469 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7470 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7471 });
7472
7473 std::vector<uint8_t> input1(
7474 {
7475 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7476 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7477 });
7478
7479 std::vector<uint8_t> output(
7480 {
7481 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7482 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7483 });
7484
7485 return AdditionQuantizeTestHelper(workloadFactory,
7486 memoryManager,
7487 shape0, input0, 7.0f, 3,
7488 shape1, input1, 7.0f, 3,
7489 shape0, output, 7.0f, 3);
7490}
7491
7492LayerTestResult<int16_t, 4> AdditionInt16Test(
7493 armnn::IWorkloadFactory& workloadFactory,
7494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7495{
7496 const unsigned int shape0[] = { 1, 2, 2, 3 };
7497 const unsigned int shape1[] = { 1, 2, 2, 3 };
7498
7499 std::vector<int16_t> input0(
7500 {
7501 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7502 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7503 });
7504
7505 std::vector<int16_t> input1(
7506 {
7507 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7508 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7509 });
7510
7511 std::vector<int16_t> output(
7512 {
7513 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7514 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7515 });
7516
7517 return AdditionQuantizeTestHelper(workloadFactory,
7518 memoryManager,
7519 shape0, input0, 7.0f, 0,
7520 shape1, input1, 7.0f, 0,
7521 shape0, output, 7.0f, 0);
7522}
7523
7524namespace
7525{
7526template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7527LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7528 armnn::IWorkloadFactory& workloadFactory,
7529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7530 const unsigned int shape0[4],
7531 const std::vector<T> & values0,
7532 float scale0,
7533 int32_t offset0,
7534 const unsigned int shape1[4],
7535 const std::vector<T> & values1,
7536 float scale1,
7537 int32_t offset1,
7538 const unsigned int outShape[4],
7539 const std::vector<T> & outValues,
7540 float outScale,
7541 int32_t outOffset)
7542{
7543 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7544 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7545 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7546
7547 inputTensorInfo0.SetQuantizationScale(scale0);
7548 inputTensorInfo0.SetQuantizationOffset(offset0);
7549
7550 inputTensorInfo1.SetQuantizationScale(scale1);
7551 inputTensorInfo1.SetQuantizationOffset(offset1);
7552
7553 outputTensorInfo.SetQuantizationScale(outScale);
7554 outputTensorInfo.SetQuantizationOffset(outOffset);
7555
7556 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7557 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7558
7559 LayerTestResult<T, 4> result(outputTensorInfo);
7560 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007561
surmeh01bceff2f2018-03-29 16:29:27 +01007562 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007563 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007564 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7565
7566 armnn::MultiplicationQueueDescriptor data;
7567 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007568 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7569 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007570 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7571
7572 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7573
surmeh01bceff2f2018-03-29 16:29:27 +01007574 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007575 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007576 outputHandle->Allocate();
7577
surmeh01bceff2f2018-03-29 16:29:27 +01007578 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007579 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007580
Derek Lambertif30f7d32019-04-09 10:25:02 +01007581 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007582 workload->Execute();
7583
7584 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7585
7586 return result;
7587}
surmeh01bceff2f2018-03-29 16:29:27 +01007588} // anonymous namespace
7589
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007590LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7591 armnn::IWorkloadFactory& workloadFactory,
7592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007593{
7594 unsigned int batchSize = 1;
7595 unsigned int channels = 2;
7596 unsigned int height = 2;
7597 unsigned int width = 3;
7598 const unsigned int shape[] = { batchSize, channels, height, width };
7599
telsoa01c577f2c2018-08-31 09:22:23 +01007600 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007601 std::vector<uint8_t> input0({
7602 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7603 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7604 });
7605
telsoa01c577f2c2018-08-31 09:22:23 +01007606 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007607 std::vector<uint8_t> input1({
7608 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7609 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7610 });
7611
telsoa01c577f2c2018-08-31 09:22:23 +01007612 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007613 std::vector<uint8_t> output(
7614 {
7615 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7616 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7617 });
7618
Sadik Armagan2999a022019-04-09 14:20:12 +01007619 // Scale/offset chosen to have output values out of range.
7620 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7621 memoryManager,
7622 shape,
7623 input0,
7624 4.0f,
7625 1,
7626 shape,
7627 input1,
7628 3.0f,
7629 -2,
7630 shape,
7631 output,
7632 1366.255f,
7633 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007634}
7635
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007636LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7637 armnn::IWorkloadFactory& workloadFactory,
7638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007639{
7640 const unsigned int shape0[] = { 1, 2, 2, 3 };
7641 const unsigned int shape1[] = { 1, 1, 1, 1 };
7642
7643 std::vector<uint8_t> input0({
7644 1, 2, 3, 4, 5, 6,
7645 7, 8, 9, 10, 11, 12
7646 });
7647
7648 std::vector<uint8_t> input1({2});
7649
7650 std::vector<uint8_t> output({
7651 2, 4, 6, 8, 10, 12,
7652 14, 16, 18, 20, 22, 24
7653 });
7654
Sadik Armagan2999a022019-04-09 14:20:12 +01007655 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7656 memoryManager,
7657 shape0,
7658 input0,
7659 1.0f,
7660 0,
7661 shape1,
7662 input1,
7663 1.0f,
7664 0,
7665 shape0,
7666 output,
7667 1.0f,
7668 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007669}
7670
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007671LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7672 armnn::IWorkloadFactory& workloadFactory,
7673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007674{
7675 const unsigned int shape0[] = { 1, 2, 2, 3 };
7676 const unsigned int shape1[] = { 1, 1, 1, 3 };
7677
7678 std::vector<uint8_t> input0({
7679 1, 2, 3, 4, 5, 6,
7680 7, 8, 9, 10, 11, 12
7681 });
7682
7683 std::vector<uint8_t> input1({1, 2, 3});
7684
7685 std::vector<uint8_t> output({
7686 1, 4, 9, 4, 10, 18,
7687 7, 16, 27, 10, 22, 36
7688 });
7689
Sadik Armagan2999a022019-04-09 14:20:12 +01007690 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7691 memoryManager,
7692 shape0,
7693 input0,
7694 1.0f,
7695 0,
7696 shape1,
7697 input1,
7698 1.0f,
7699 0,
7700 shape0,
7701 output,
7702 1.0f,
7703 0);
7704}
7705
7706LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7707 armnn::IWorkloadFactory& workloadFactory,
7708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7709{
7710 const unsigned int shape[] = { 1, 2, 2, 3 };
7711
7712 std::vector<int16_t> input0(
7713 {
7714 6, 7, 8, 9, 10, 11,
7715 12, 13, 14, 15, 16, 17
7716 });
7717
7718 std::vector<int16_t> input1(
7719 {
7720 1, 2, 3, 4, 5, 6,
7721 7, 8, 9, 10, 11, 12
7722 });
7723
7724 std::vector<int16_t> output(
7725 {
7726 6, 14, 24, 36, 50, 66,
7727 84, 104, 126, 150, 176, 204
7728 });
7729
7730 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7731 memoryManager,
7732 shape,
7733 input0,
7734 1.0f,
7735 0,
7736 shape,
7737 input1,
7738 1.0f,
7739 0,
7740 shape,
7741 output,
7742 1.0f,
7743 0);
7744}
7745
7746LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7747 armnn::IWorkloadFactory& workloadFactory,
7748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7749{
7750 const unsigned int shape0[] = { 1, 2, 2, 3 };
7751 const unsigned int shape1[] = { 1, 1, 1, 1 };
7752
7753 std::vector<int16_t> input0(
7754 {
7755 1, 2, 3, 4, 5, 6,
7756 7, 8, 9, 10, 11, 12
7757 });
7758
7759 std::vector<int16_t> input1({2});
7760
7761 std::vector<int16_t> output(
7762 {
7763 2, 4, 6, 8, 10, 12,
7764 14, 16, 18, 20, 22, 24
7765 });
7766
7767 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7768 memoryManager,
7769 shape0,
7770 input0,
7771 1.0f,
7772 0,
7773 shape1,
7774 input1,
7775 1.0f,
7776 0,
7777 shape0,
7778 output,
7779 1.0f,
7780 0);
7781}
7782
7783LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7784 armnn::IWorkloadFactory& workloadFactory,
7785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7786{
7787 const unsigned int shape0[] = { 1, 2, 2, 3 };
7788 const unsigned int shape1[] = { 1, 1, 1, 3 };
7789
7790 std::vector<int16_t> input0(
7791 {
7792 1, 2, 3, 4, 5, 6,
7793 7, 8, 9, 10, 11, 12
7794 });
7795
7796 std::vector<int16_t> input1({1, 2, 3});
7797
7798 std::vector<int16_t> output(
7799 {
7800 1, 4, 9, 4, 10, 18,
7801 7, 16, 27, 10, 22, 36
7802 });
7803
7804 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7805 memoryManager,
7806 shape0,
7807 input0,
7808 1.0f,
7809 0,
7810 shape1,
7811 input1,
7812 1.0f,
7813 0,
7814 shape0,
7815 output,
7816 1.0f,
7817 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007818}
telsoa014fcda012018-03-09 14:13:49 +00007819
David Beckf195f032018-09-06 16:46:34 +01007820namespace
7821{
Sadik Armagan2999a022019-04-09 14:20:12 +01007822template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007823LayerTestResult<T, 4> SubtractionTestHelper(
7824 armnn::IWorkloadFactory& workloadFactory,
7825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7826 const unsigned int shape0[4],
7827 const std::vector<T>& values0,
7828 float scale0,
7829 int32_t offset0,
7830 const unsigned int shape1[4],
7831 const std::vector<T> & values1,
7832 float scale1,
7833 int32_t offset1,
7834 const unsigned int outShape[4],
7835 const std::vector<T> & outValues,
7836 float outScale,
7837 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007838{
Sadik Armagan2999a022019-04-09 14:20:12 +01007839 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7840 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7841 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007842
7843 inputTensorInfo0.SetQuantizationScale(scale0);
7844 inputTensorInfo0.SetQuantizationOffset(offset0);
7845
7846 inputTensorInfo1.SetQuantizationScale(scale1);
7847 inputTensorInfo1.SetQuantizationOffset(offset1);
7848
7849 outputTensorInfo.SetQuantizationScale(outScale);
7850 outputTensorInfo.SetQuantizationOffset(outOffset);
7851
7852 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7853 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7854
7855 LayerTestResult<T, 4> result(outputTensorInfo);
7856 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7857
7858 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7859 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7860 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7861
7862 armnn::SubtractionQueueDescriptor data;
7863 armnn::WorkloadInfo info;
7864 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7865 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7866 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7867
7868 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7869
7870 inputHandle0->Allocate();
7871 inputHandle1->Allocate();
7872 outputHandle->Allocate();
7873
7874 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7875 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7876
Derek Lambertif30f7d32019-04-09 10:25:02 +01007877 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007878 workload->Execute();
7879
7880 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7881
7882 return result;
7883}
7884} // anonymous namespace
7885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007886LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7887 armnn::IWorkloadFactory& workloadFactory,
7888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007889{
7890 const unsigned int shape0[] = { 1, 1, 2, 2 };
7891 const unsigned int shape1[] = { 1, 1, 2, 2 };
7892
7893 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7894 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7895 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7896
Sadik Armagan2999a022019-04-09 14:20:12 +01007897 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7898 memoryManager,
7899 shape0, input0, 0.5f, 2,
7900 shape1, input1, 1.0f, 0,
7901 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007902}
7903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007904LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7905 armnn::IWorkloadFactory& workloadFactory,
7906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007907{
7908 const unsigned int shape0[] = { 1, 1, 2, 2 };
7909 const unsigned int shape1[] = { 1, 1, 1, 1 };
7910
7911 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7912 std::vector<uint8_t> input1({ 2 });
7913 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7914
Sadik Armagan2999a022019-04-09 14:20:12 +01007915 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7916 memoryManager,
7917 shape0, input0, 0.5f, 2,
7918 shape1, input1, 1.0f, 0,
7919 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007920}
7921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007922LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7923 armnn::IWorkloadFactory& workloadFactory,
7924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007925{
7926 const unsigned int shape0[] = { 1, 1, 2, 2 };
7927 const unsigned int shape1[] = { 1, 1, 2, 1 };
7928
7929 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7930 std::vector<uint8_t> input1({ 2, 1 });
7931 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7932
Sadik Armagan2999a022019-04-09 14:20:12 +01007933 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7934 memoryManager,
7935 shape0, input0, 1.0f, 0,
7936 shape1, input1, 1.0f, 0,
7937 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007938}
7939
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007940LayerTestResult<float, 4> SubtractionTest(
7941 armnn::IWorkloadFactory& workloadFactory,
7942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007943{
7944 const unsigned int shape0[] = { 1, 1, 2, 2 };
7945 const unsigned int shape1[] = { 1, 1, 2, 2 };
7946
7947 std::vector<float> input0({ 1, 2, 3, 4 });
7948 std::vector<float> input1({ 1, -1, 0, 2 });
7949 std::vector<float> output({ 0, 3, 3, 2 });
7950
Sadik Armagan2999a022019-04-09 14:20:12 +01007951 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7952 memoryManager,
7953 shape0, input0, 1.0f, 0,
7954 shape1, input1, 1.0f, 0,
7955 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007956}
7957
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007958LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7959 armnn::IWorkloadFactory& workloadFactory,
7960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007961{
7962 const unsigned int shape0[] = { 1, 1, 2, 2 };
7963 const unsigned int shape1[] = { 1, 1, 1, 1 };
7964
7965 std::vector<float> input0({ 1, 2, 3, 4 });
7966 std::vector<float> input1({ 10 });
7967 std::vector<float> output({ -9, -8, -7, -6 });
7968
Sadik Armagan2999a022019-04-09 14:20:12 +01007969 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7970 memoryManager,
7971 shape0, input0, 1.0f, 0,
7972 shape1, input1, 1.0f, 0,
7973 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007974}
7975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007976LayerTestResult<float, 4> SubtractionBroadcastTest(
7977 armnn::IWorkloadFactory& workloadFactory,
7978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007979{
7980 const unsigned int shape0[] = { 1, 1, 2, 2 };
7981 const unsigned int shape1[] = { 1, 1, 1, 2 };
7982
7983 std::vector<float> input0({ 1, 2, 3, 4 });
7984 std::vector<float> input1({ 10, -5 });
7985 std::vector<float> output({ -9, 7, -7, 9 });
7986
Sadik Armagan2999a022019-04-09 14:20:12 +01007987 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7988 memoryManager,
7989 shape0, input0, 1.0f, 0,
7990 shape1, input1, 1.0f, 0,
7991 shape0, output, 1.0f, 0);
7992}
7993
7994LayerTestResult<int16_t, 4> SubtractionInt16Test(
7995 armnn::IWorkloadFactory& workloadFactory,
7996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7997{
7998 const unsigned int shape0[] = { 1, 1, 2, 2 };
7999 const unsigned int shape1[] = { 1, 1, 2, 2 };
8000
8001 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8002 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8003 std::vector<int16_t> output({ 3, 3, 5, 5 });
8004
8005 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8006 memoryManager,
8007 shape0, input0, 0.5f, 0,
8008 shape1, input1, 1.0f, 0,
8009 shape0, output, 1.0f, 0);
8010}
8011
8012LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8013 armnn::IWorkloadFactory& workloadFactory,
8014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8015{
8016 const unsigned int shape0[] = { 1, 1, 2, 2 };
8017 const unsigned int shape1[] = { 1, 1, 1, 1 };
8018
8019 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8020 std::vector<int16_t> input1({ 2 });
8021 std::vector<int16_t> output({ 3, 4, 5, 6 });
8022
8023 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8024 memoryManager,
8025 shape0, input0, 0.5f, 0,
8026 shape1, input1, 1.0f, 0,
8027 shape0, output, 1.0f, 0);
8028}
8029
8030LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8031 armnn::IWorkloadFactory& workloadFactory,
8032 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8033{
8034 const unsigned int shape0[] = { 1, 1, 2, 2 };
8035 const unsigned int shape1[] = { 1, 1, 2, 1 };
8036
8037 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8038 std::vector<int16_t> input1({ 2, 1 });
8039 std::vector<int16_t> output({ 8, 11, 12, 15 });
8040
8041 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8042 memoryManager,
8043 shape0, input0, 1.0f, 0,
8044 shape1, input1, 1.0f, 0,
8045 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008046}
8047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008048LayerTestResult<float, 4> BatchNormTest(
8049 armnn::IWorkloadFactory& workloadFactory,
8050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008051{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008052 // BatchSize: 1
8053 // Channels: 2
8054 // Height: 3
8055 // Width: 2
8056
8057 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8058 std::vector<float> inputValues
8059 {
8060 // Batch 0, Channel 0, Height (3) x Width (2)
8061 1.f, 4.f,
8062 4.f, 2.f,
8063 1.f, 6.f,
8064
8065 // Batch 0, Channel 1, Height (3) x Width (2)
8066 1.f, 1.f,
8067 4.f, 1.f,
8068 -2.f, 4.f
8069 };
8070 std::vector<float> expectedOutputValues
8071 {
8072 // Batch 0, Channel 0, Height (3) x Width (2)
8073 1.f, 4.f,
8074 4.f, 2.f,
8075 1.f, 6.f,
8076
8077 // Batch 0, Channel 1, Height (3) x Width (2)
8078 3.f, 3.f,
8079 4.f, 3.f,
8080 2.f, 4.f
8081 };
8082
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008083 return BatchNormTestImpl<armnn::DataType::Float32>(
8084 workloadFactory, memoryManager,
8085 inputOutputShape, inputValues, expectedOutputValues,
8086 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008087}
8088
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008089LayerTestResult<float, 4> BatchNormNhwcTest(
8090 armnn::IWorkloadFactory& workloadFactory,
8091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008092{
8093 // BatchSize: 1
8094 // Height: 3
8095 // Width: 2
8096 // Channels: 2
8097
8098 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8099 std::vector<float> inputValues
8100 {
8101 // Batch 0, Height 0, Width (2) x Channel (2)
8102 1.f, 1.f,
8103 4.f, 1.f,
8104
8105 // Batch 0, Height 1, Width (2) x Channel (2)
8106 4.f, 4.f,
8107 2.f, 1.f,
8108
8109 // Batch 0, Height 2, Width (2) x Channel (2)
8110 1.f, -2.f,
8111 6.f, 4.f
8112 };
8113 std::vector<float> expectedOutputValues
8114 {
8115 // Batch 0, Height 0, Width (2) x Channel (2)
8116 1.f, 3.f,
8117 4.f, 3.f,
8118
8119 // Batch 0, Height 1, Width (2) x Channel (2)
8120 4.f, 4.f,
8121 2.f, 3.f,
8122
8123 // Batch 0, Height 2, Width (2) x Channel (2)
8124 1.f, 2.f,
8125 6.f, 4.f
8126 };
8127
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008128 return BatchNormTestImpl<armnn::DataType::Float32>(
8129 workloadFactory, memoryManager,
8130 inputOutputShape, inputValues, expectedOutputValues,
8131 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008132}
8133
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008134LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8135 armnn::IWorkloadFactory& workloadFactory,
8136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008137{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008138 // BatchSize: 1
8139 // Channels: 2
8140 // Height: 3
8141 // Width: 2
8142
8143 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8144 std::vector<float> inputValues
8145 {
8146 // Batch 0, Channel 0, Height (3) x Width (2)
8147 1.f, 4.f,
8148 4.f, 2.f,
8149 1.f, 6.f,
8150
8151 // Batch 0, Channel 1, Height (3) x Width (2)
8152 1.f, 1.f,
8153 4.f, 1.f,
8154 -2.f, 4.f
8155 };
8156 std::vector<float> expectedOutputValues
8157 {
8158 // Batch 0, Channel 0, Height (3) x Width (2)
8159 1.f, 4.f,
8160 4.f, 2.f,
8161 1.f, 6.f,
8162
8163 // Batch 0, Channel 1, Height (3) x Width (2)
8164 3.f, 3.f,
8165 4.f, 3.f,
8166 2.f, 4.f
8167 };
8168
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008169 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8170 workloadFactory, memoryManager,
8171 inputOutputShape, inputValues, expectedOutputValues,
8172 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008173}
8174
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008175LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8176 armnn::IWorkloadFactory& workloadFactory,
8177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008178{
8179 // BatchSize: 1
8180 // Height: 3
8181 // Width: 2
8182 // Channels: 2
8183
8184 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8185 std::vector<float> inputValues
8186 {
8187 // Batch 0, Height 0, Width (2) x Channel (2)
8188 1.f, 1.f,
8189 4.f, 1.f,
8190
8191 // Batch 0, Height 1, Width (2) x Channel (2)
8192 4.f, 4.f,
8193 2.f, 1.f,
8194
8195 // Batch 0, Height 2, Width (2) x Channel (2)
8196 1.f, -2.f,
8197 6.f, 4.f
8198 };
8199 std::vector<float> expectedOutputValues
8200 {
8201 // Batch 0, Height 0, Width (2) x Channel (2)
8202 1.f, 3.f,
8203 4.f, 3.f,
8204
8205 // Batch 0, Height 1, Width (2) x Channel (2)
8206 4.f, 4.f,
8207 2.f, 3.f,
8208
8209 // Batch 0, Height 2, Width (2) x Channel (2)
8210 1.f, 2.f,
8211 6.f, 4.f
8212 };
8213
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008214 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8215 (workloadFactory, memoryManager,
8216 inputOutputShape, inputValues, expectedOutputValues,
8217 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008218}
8219
Matteo Martincighf5507132019-06-04 10:59:47 +01008220LayerTestResult<int16_t, 4> BatchNormInt16Test(
8221 armnn::IWorkloadFactory& workloadFactory,
8222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8223{
8224 // BatchSize: 1
8225 // Channels: 2
8226 // Height: 3
8227 // Width: 2
8228
8229 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8230 std::vector<float> inputValues
8231 {
8232 // Batch 0, Channel 0, Height (3) x Width (2)
8233 1.f, 4.f,
8234 4.f, 2.f,
8235 1.f, 6.f,
8236
8237 // Batch 0, Channel 1, Height (3) x Width (2)
8238 1.f, 1.f,
8239 4.f, 1.f,
8240 -2.f, 4.f
8241 };
8242 std::vector<float> expectedOutputValues
8243 {
8244 // Batch 0, Channel 0, Height (3) x Width (2)
8245 1.f, 4.f,
8246 4.f, 2.f,
8247 1.f, 6.f,
8248
8249 // Batch 0, Channel 1, Height (3) x Width (2)
8250 3.f, 3.f,
8251 4.f, 3.f,
8252 2.f, 4.f
8253 };
8254
8255 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8256 workloadFactory, memoryManager,
8257 inputOutputShape, inputValues, expectedOutputValues,
8258 1.f/20.f, 50, armnn::DataLayout::NCHW);
8259}
8260
8261LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8262 armnn::IWorkloadFactory& workloadFactory,
8263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8264{
8265 // BatchSize: 1
8266 // Height: 3
8267 // Width: 2
8268 // Channels: 2
8269
8270 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8271 std::vector<float> inputValues
8272 {
8273 // Batch 0, Height 0, Width (2) x Channel (2)
8274 1.f, 1.f,
8275 4.f, 1.f,
8276
8277 // Batch 0, Height 1, Width (2) x Channel (2)
8278 4.f, 4.f,
8279 2.f, 1.f,
8280
8281 // Batch 0, Height 2, Width (2) x Channel (2)
8282 1.f, -2.f,
8283 6.f, 4.f
8284 };
8285 std::vector<float> expectedOutputValues
8286 {
8287 // Batch 0, Height 0, Width (2) x Channel (2)
8288 1.f, 3.f,
8289 4.f, 3.f,
8290
8291 // Batch 0, Height 1, Width (2) x Channel (2)
8292 4.f, 4.f,
8293 2.f, 3.f,
8294
8295 // Batch 0, Height 2, Width (2) x Channel (2)
8296 1.f, 2.f,
8297 6.f, 4.f
8298 };
8299
8300 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8301 (workloadFactory, memoryManager,
8302 inputOutputShape, inputValues, expectedOutputValues,
8303 1.f/20.f, 50, armnn::DataLayout::NHWC);
8304}
8305
Nina Drozd58ef2c62019-05-16 12:09:18 +01008306LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008307 armnn::IWorkloadFactory& workloadFactory,
8308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008309{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008310 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008311}
8312
Nina Drozd58ef2c62019-05-16 12:09:18 +01008313LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8314 armnn::IWorkloadFactory& workloadFactory,
8315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8316{
8317 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8318}
8319
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008320LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8321 armnn::IWorkloadFactory& workloadFactory,
8322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008323{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008324 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008325}
8326
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008327LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8328 armnn::IWorkloadFactory& workloadFactory,
8329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008330{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008331 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008332}
8333
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008334LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8335 armnn::IWorkloadFactory& workloadFactory,
8336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008337{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008338 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008339}
8340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008341LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8342 armnn::IWorkloadFactory& workloadFactory,
8343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008344{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008345 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8346 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008347}
8348
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008349LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8350 armnn::IWorkloadFactory& workloadFactory,
8351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008352{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008353 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8354 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008355}
8356
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008357LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8358 armnn::IWorkloadFactory& workloadFactory,
8359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008360{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008361 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008362}
8363
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008364LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8365 armnn::IWorkloadFactory& workloadFactory,
8366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008367{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008368 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008369}
8370
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008371LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8372 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008373 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8374 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008376 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8377 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008378}
8379
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008380LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8381 armnn::IWorkloadFactory& workloadFactory,
8382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008383{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008384 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008385}
8386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008387LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8388 armnn::IWorkloadFactory& workloadFactory,
8389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008391 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8392 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008393}
8394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008395LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8396 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8398 bool useSubtensor)
8399{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008400 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8401 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008402}
8403
8404LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8405 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008408 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008409}
8410
8411LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8412 armnn::IWorkloadFactory& workloadFactory,
8413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8414{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008415 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008416}
8417
8418LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8419 armnn::IWorkloadFactory& workloadFactory,
8420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8421{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008422 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008423}
8424
8425LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8426 armnn::IWorkloadFactory& workloadFactory,
8427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8428{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008429 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8430 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008431}
8432
8433LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8434 armnn::IWorkloadFactory& workloadFactory,
8435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8436{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008437 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8438 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008439}
8440
8441LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8442 armnn::IWorkloadFactory& workloadFactory,
8443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8444{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008445 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8446 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008447}
8448
8449LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8450 armnn::IWorkloadFactory& workloadFactory,
8451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8452{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008453 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8454 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008455}
8456
8457LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8458 armnn::IWorkloadFactory& workloadFactory,
8459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8460 bool useSubtensor)
8461{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008462 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8463 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008464}
8465
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008466LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8467 armnn::IWorkloadFactory& workloadFactory,
8468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8469 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008470{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008471 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8472 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008473}
8474
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008475LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8476 armnn::IWorkloadFactory& workloadFactory,
8477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8478 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008480 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008481 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008482}
8483
Teresa Charlin0434df62019-06-06 13:40:35 +01008484LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8485 armnn::IWorkloadFactory& workloadFactory,
8486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8487 bool forceNoPadding)
8488{
8489 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8490 workloadFactory, memoryManager, forceNoPadding);
8491}
8492
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008493LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8494 armnn::IWorkloadFactory& workloadFactory,
8495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8496 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008498 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8499 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008500}
8501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008502LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8503 armnn::IWorkloadFactory& workloadFactory,
8504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8505 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008507 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008508 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008509}
8510
Teresa Charlin0434df62019-06-06 13:40:35 +01008511LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8512 armnn::IWorkloadFactory& workloadFactory,
8513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8514 bool forceNoPadding)
8515{
8516 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8517 workloadFactory, memoryManager, forceNoPadding);
8518}
8519
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008520LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8521 armnn::IWorkloadFactory& workloadFactory,
8522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008523 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008524{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008525 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008526}
8527
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008528LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8529 armnn::IWorkloadFactory& workloadFactory,
8530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008531 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008533 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008534}
8535
Teresa Charlin0434df62019-06-06 13:40:35 +01008536LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8537 armnn::IWorkloadFactory& workloadFactory,
8538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8539 const armnn::DataLayout dataLayout)
8540{
8541 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8542}
8543LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8544 armnn::IWorkloadFactory& workloadFactory,
8545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8546{
8547 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8548}
8549
8550LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8551 armnn::IWorkloadFactory& workloadFactory,
8552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8553{
8554 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8555 workloadFactory, memoryManager, 1.0f, -5);
8556}
8557
8558LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8559 armnn::IWorkloadFactory& workloadFactory,
8560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8561{
8562 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8563 workloadFactory, memoryManager);
8564}
8565
8566LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8567 armnn::IWorkloadFactory& workloadFactory,
8568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8569{
8570 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8571}
8572
8573LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8574 armnn::IWorkloadFactory& workloadFactory,
8575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8576{
8577 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8578 workloadFactory, memoryManager, 1.0f, -5);
8579}
8580
8581LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8582 armnn::IWorkloadFactory& workloadFactory,
8583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8584{
8585 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8586 workloadFactory, memoryManager);
8587}
8588
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008589LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8590 armnn::IWorkloadFactory& workloadFactory,
8591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008592 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008593{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008594 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008595}
8596
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008597LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8598 armnn::IWorkloadFactory& workloadFactory,
8599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008600 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008601{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008602 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008603 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008604}
8605
Teresa Charlin0434df62019-06-06 13:40:35 +01008606LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8607 armnn::IWorkloadFactory& workloadFactory,
8608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8609 const armnn::DataLayout dataLayout)
8610{
8611 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8612 workloadFactory, memoryManager, dataLayout);
8613}
8614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008615LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8616 armnn::IWorkloadFactory& workloadFactory,
8617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8618 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008620 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008621 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008622}
8623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008624LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8625 armnn::IWorkloadFactory& workloadFactory,
8626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008627{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008628 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008629}
8630
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008631LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8632 armnn::IWorkloadFactory& workloadFactory,
8633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008635 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8636 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008637}
8638
Teresa Charlin0434df62019-06-06 13:40:35 +01008639LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8640 armnn::IWorkloadFactory& workloadFactory,
8641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8642{
8643 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8644 workloadFactory, memoryManager);
8645}
8646LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8647 armnn::IWorkloadFactory& workloadFactory,
8648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8649{
8650 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8651}
8652
8653LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8654 armnn::IWorkloadFactory& workloadFactory,
8655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8656{
8657 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8658 workloadFactory, memoryManager);
8659}
8660
8661LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8662 armnn::IWorkloadFactory& workloadFactory,
8663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8664{
8665 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8666 workloadFactory, memoryManager);
8667}
8668
8669LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8670 armnn::IWorkloadFactory& workloadFactory,
8671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8672{
8673 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8674 workloadFactory, memoryManager);
8675}
8676
8677LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8678 armnn::IWorkloadFactory& workloadFactory,
8679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8680{
8681 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8682 workloadFactory, memoryManager);
8683}
8684
8685LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8686 armnn::IWorkloadFactory& workloadFactory,
8687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8688{
8689 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8690 workloadFactory, memoryManager);
8691}
8692
8693LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8694 armnn::IWorkloadFactory& workloadFactory,
8695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8696{
8697 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8698}
8699
8700LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8701 armnn::IWorkloadFactory& workloadFactory,
8702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8703{
8704 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8705 workloadFactory, memoryManager);
8706}
8707
8708LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8709 armnn::IWorkloadFactory& workloadFactory,
8710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8711{
8712 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8713 workloadFactory, memoryManager);
8714}
8715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008716LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8717 armnn::IWorkloadFactory& workloadFactory,
8718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008719 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008720{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008721 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008722}
8723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008724LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8725 armnn::IWorkloadFactory& workloadFactory,
8726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008727 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008729 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008730}
8731
Teresa Charlin0434df62019-06-06 13:40:35 +01008732LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8733 armnn::IWorkloadFactory& workloadFactory,
8734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8735 const armnn::DataLayout dataLayout)
8736{
8737 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8738}
8739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008740LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8741 armnn::IWorkloadFactory& workloadFactory,
8742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008743{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008744 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008745}
8746
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008747LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8748 armnn::IWorkloadFactory& workloadFactory,
8749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008750{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008751 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008752}
8753
Teresa Charlin0434df62019-06-06 13:40:35 +01008754LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8755 armnn::IWorkloadFactory& workloadFactory,
8756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8757{
8758 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8759}
8760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008761LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8762 armnn::IWorkloadFactory& workloadFactory,
8763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008765 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008766}
8767
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008768LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8769 armnn::IWorkloadFactory& workloadFactory,
8770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008771{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008772 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008773}
8774
Teresa Charlin0434df62019-06-06 13:40:35 +01008775LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8776 armnn::IWorkloadFactory& workloadFactory,
8777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8778{
8779 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8780}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008781LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8782 armnn::IWorkloadFactory& workloadFactory,
8783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008784{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008785 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008786}
8787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008788LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8789 armnn::IWorkloadFactory& workloadFactory,
8790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008791{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008792 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008793}
8794
Teresa Charlin0434df62019-06-06 13:40:35 +01008795LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8796 armnn::IWorkloadFactory& workloadFactory,
8797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8798{
8799 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8800}
8801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008802LayerTestResult<float, 4> L2Pooling2dSize7Test(
8803 armnn::IWorkloadFactory& workloadFactory,
8804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008805{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008806 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008807}
8808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008809LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8810 armnn::IWorkloadFactory& workloadFactory,
8811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008812{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008813 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008814}
8815
Teresa Charlin0434df62019-06-06 13:40:35 +01008816LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8817 armnn::IWorkloadFactory& workloadFactory,
8818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8819{
8820 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8821}
8822
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008823LayerTestResult<float, 4> L2Pooling2dSize9Test(
8824 armnn::IWorkloadFactory& workloadFactory,
8825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008826{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008827 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008828}
8829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008830LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8831 armnn::IWorkloadFactory& workloadFactory,
8832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008833{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008834 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008835}
8836
Teresa Charlin0434df62019-06-06 13:40:35 +01008837LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
8838 armnn::IWorkloadFactory& workloadFactory,
8839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8840{
8841 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8842}
8843LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8844 armnn::IWorkloadFactory& workloadFactory,
8845 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8846{
8847 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8848}
8849
8850LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8851 armnn::IWorkloadFactory& workloadFactory,
8852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8853{
8854 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8855}
8856
8857LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
8858 armnn::IWorkloadFactory& workloadFactory,
8859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8860{
8861 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8862}
8863
8864LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8865 armnn::IWorkloadFactory& workloadFactory,
8866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8867{
8868 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8869}
8870
8871LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8872 armnn::IWorkloadFactory& workloadFactory,
8873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8874{
8875 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8876}
8877
8878LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
8879 armnn::IWorkloadFactory& workloadFactory,
8880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8881{
8882 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8883}
8884
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008885LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8886 armnn::IWorkloadFactory& workloadFactory,
8887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008888{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008889 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008890}
8891
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008892LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8893 armnn::IWorkloadFactory& workloadFactory,
8894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008895{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008896 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008897}
8898
Teresa Charlin0434df62019-06-06 13:40:35 +01008899LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
8900 armnn::IWorkloadFactory& workloadFactory,
8901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8902{
8903 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8904}
8905
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008906LayerTestResult<float, 4> ComparePooling2dTest(
8907 armnn::IWorkloadFactory& workloadFactory,
8908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8909 armnn::IWorkloadFactory& refWorkloadFactory,
8910 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008911{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008912 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008913 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008914}
8915
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008916LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8917 armnn::IWorkloadFactory& workloadFactory,
8918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8919 armnn::IWorkloadFactory& refWorkloadFactory,
8920 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008921{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008922 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008923 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008924}
8925
Teresa Charlin0434df62019-06-06 13:40:35 +01008926LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
8927 armnn::IWorkloadFactory& workloadFactory,
8928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8929 armnn::IWorkloadFactory& refWorkloadFactory,
8930 armnn::PoolingAlgorithm poolingType)
8931{
8932 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8933 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
8934}
8935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008936LayerTestResult<float, 2> FullyConnectedLargeTest(
8937 armnn::IWorkloadFactory& workloadFactory,
8938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8939 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008940{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008941 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008942}
8943
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008944LayerTestResult<float, 4> SimplePermuteFloat32Test(
8945 armnn::IWorkloadFactory& workloadFactory,
8946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008947{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008948 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008949};
8950
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008951LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8952 armnn::IWorkloadFactory& workloadFactory,
8953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008954{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008955 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008956};
surmeh01bceff2f2018-03-29 16:29:27 +01008957
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008958LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8959 armnn::IWorkloadFactory& workloadFactory,
8960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008961{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008962 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008963};
8964
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008965LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8966 armnn::IWorkloadFactory& workloadFactory,
8967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008968{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008969 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008970};
8971
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008972LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8973 armnn::IWorkloadFactory& workloadFactory,
8974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008975{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008976 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008977};
8978
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008979LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8980 armnn::IWorkloadFactory& workloadFactory,
8981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008982{
8983 // Create Initial Tensor
8984 // 1, 2, 3
8985 // 4, 5, 6
8986 // 7, 8, 9
8987
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008988 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8989 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008990
8991 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8992 {1, 2, 3,
8993 4, 5, 6,
8994 7, 8, 9
8995 });
8996
8997 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8998 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8999 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9000 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9001
9002 // Apply MaxPool poolSize = 1x1, stride=2x2
9003 // Result =
9004 // 1, 3
9005 // 7, 9
9006 armnn::Pooling2dDescriptor descriptor;
9007 descriptor.m_PoolHeight = 1;
9008 descriptor.m_PoolWidth = 1;
9009 descriptor.m_StrideX = 2;
9010 descriptor.m_StrideY = 2;
9011 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9012
9013 armnn::Pooling2dQueueDescriptor queueDescriptor;
9014 queueDescriptor.m_Parameters = descriptor;
9015 armnn::WorkloadInfo workloadInfo;
9016 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9017 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9018
9019 // Create the MaxPool
9020 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9021
9022 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9023 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9024 boost::multi_array<float, 4> resultMaxPool;
9025 resultMaxPool.resize(shape);
9026
9027
9028 // Create addition with another tensor the same size
9029 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9030 // with the initial tensor.
9031 // 12, 16
9032 // 24, 28
9033
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009034 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9035 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009036
9037 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9038 {12, 16,
9039 24, 28,
9040 });
9041
9042 // Expected output tensor after MaxPool and Addition.
9043 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9044 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9045 {
9046 13, 19,
9047 31, 37
9048 }));
9049
9050 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9051 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9052
9053 armnn::AdditionQueueDescriptor data;
9054 armnn::WorkloadInfo info;
9055
9056 // Add the output of the MaxPool and the new tensor
9057 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9058 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9059 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9060
9061 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9062
9063 poolingInputHandle->Allocate();
9064 poolingOutputHandle->Allocate();
9065 addInputHandle->Allocate();
9066 addOutputHandle->Allocate();
9067
9068 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9069 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9070
9071 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9072 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9073
Derek Lambertif30f7d32019-04-09 10:25:02 +01009074 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009075 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009076 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009077 addWorkload->Execute();
9078
9079 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9080
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009081 return addRet;
9082}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009084LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9085 armnn::IWorkloadFactory& workloadFactory,
9086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009087{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009088 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009089}
9090
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009091LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9092 armnn::IWorkloadFactory& workloadFactory,
9093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009094{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009095 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009096}
9097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009098LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9099 armnn::IWorkloadFactory& workloadFactory,
9100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009101{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009102 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009103}
9104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009105LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9106 armnn::IWorkloadFactory& workloadFactory,
9107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009108{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009109 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009110}
9111
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009112LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9113 armnn::IWorkloadFactory& workloadFactory,
9114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009115{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009116 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009117}
9118
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009119LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9120 armnn::IWorkloadFactory& workloadFactory,
9121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009122{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009123 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009124}
9125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009126LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9127 armnn::IWorkloadFactory& workloadFactory,
9128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009129{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009130 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009131}
9132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009133LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9134 armnn::IWorkloadFactory& workloadFactory,
9135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009136{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009137 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009138}
9139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009140LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9141 armnn::IWorkloadFactory& workloadFactory,
9142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009143{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009144 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009145}
9146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009147LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9148 armnn::IWorkloadFactory& workloadFactory,
9149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009150{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009151 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009152}
9153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009154LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9155 armnn::IWorkloadFactory& workloadFactory,
9156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009157{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009158 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009159}
9160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009161LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9162 armnn::IWorkloadFactory& workloadFactory,
9163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009164{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009165 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009166}
9167
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009168LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9169 armnn::IWorkloadFactory& workloadFactory,
9170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009171{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009172 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009173}
9174
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009175LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9176 armnn::IWorkloadFactory& workloadFactory,
9177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009178{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009179 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009180}
9181
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009182LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9183 armnn::IWorkloadFactory& workloadFactory,
9184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009186 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009187}
9188
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009189LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9190 armnn::IWorkloadFactory& workloadFactory,
9191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009193 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009194}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009195
nikraj01120522a2019-05-31 11:33:07 +01009196LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9197 armnn::IWorkloadFactory& workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9199{
9200 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9201}
9202
9203LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9204 armnn::IWorkloadFactory& workloadFactory,
9205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9206{
9207 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9208}
9209
9210LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9211 armnn::IWorkloadFactory& workloadFactory,
9212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9213{
9214 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9215}
9216
9217LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9218 armnn::IWorkloadFactory& workloadFactory,
9219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9220{
9221 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9222}
9223
9224LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9225 armnn::IWorkloadFactory& workloadFactory,
9226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9227{
9228 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9229}
9230
9231LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9232 armnn::IWorkloadFactory& workloadFactory,
9233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9234{
9235 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9236}
9237
9238LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9239 armnn::IWorkloadFactory& workloadFactory,
9240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9241{
9242 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9243}
9244
9245LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9246 armnn::IWorkloadFactory& workloadFactory,
9247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9248{
9249 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9250}
9251
Keith Davisa57eccb2019-06-14 17:33:22 +01009252
9253LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9254 armnn::IWorkloadFactory& workloadFactory,
9255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9256{
9257 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9258 workloadFactory,
9259 memoryManager);
9260}
9261
9262LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9263 armnn::IWorkloadFactory& workloadFactory,
9264 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9265{
9266 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9267 workloadFactory,
9268 memoryManager,
9269 armnn::DataLayout::NCHW);
9270}
9271
9272LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test(
9273 armnn::IWorkloadFactory& workloadFactory,
9274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9275{
9276 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9277 workloadFactory,
9278 memoryManager);
9279}
9280
9281LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test(
9282 armnn::IWorkloadFactory& workloadFactory,
9283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9284{
9285 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9286 workloadFactory,
9287 memoryManager,
9288 armnn::DataLayout::NCHW);
9289}
9290
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009291namespace {
9292
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009293} // anonymous namespace
9294
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009295LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9296 armnn::IWorkloadFactory& workloadFactory,
9297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9298{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009299 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009300}
9301
9302LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9303 armnn::IWorkloadFactory& workloadFactory,
9304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9305{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009306 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009307}
9308
9309LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9310 armnn::IWorkloadFactory& workloadFactory,
9311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9312{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009313 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009314}
9315
9316LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9317 armnn::IWorkloadFactory& workloadFactory,
9318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9319{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009320 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009321}
9322
9323LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9324 armnn::IWorkloadFactory& workloadFactory,
9325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009327 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009328}
9329
9330LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9331 armnn::IWorkloadFactory& workloadFactory,
9332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009334 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009335}
9336
9337LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9338 armnn::IWorkloadFactory& workloadFactory,
9339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9340{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009341 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009342}
9343
9344LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9345 armnn::IWorkloadFactory& workloadFactory,
9346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009348 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009349}
9350
9351LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9352 armnn::IWorkloadFactory& workloadFactory,
9353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009355 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009356}
9357
9358LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9359 armnn::IWorkloadFactory& workloadFactory,
9360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9361{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009362 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009363}
9364
9365LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9366 armnn::IWorkloadFactory& workloadFactory,
9367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9368{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009369 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009370}
9371
9372LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9373 armnn::IWorkloadFactory& workloadFactory,
9374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9375{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009376 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009377}
9378
9379LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9380 armnn::IWorkloadFactory& workloadFactory,
9381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9382{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009383 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009384}
9385
9386LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9387 armnn::IWorkloadFactory& workloadFactory,
9388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9389{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009390 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009391}
9392
9393LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9394 armnn::IWorkloadFactory& workloadFactory,
9395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9396{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009397 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009398}
9399
9400LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9401 armnn::IWorkloadFactory& workloadFactory,
9402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9403{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009404 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009405}
9406
9407LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9408 armnn::IWorkloadFactory& workloadFactory,
9409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009411 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009412}
9413
9414LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9415 armnn::IWorkloadFactory& workloadFactory,
9416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9417{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009418 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009419}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009420
Matteo Martincigh42666a12019-05-29 08:53:41 +01009421LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9422 armnn::IWorkloadFactory& workloadFactory,
9423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9424{
9425 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9426}
9427
9428LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9429 armnn::IWorkloadFactory& workloadFactory,
9430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9431{
9432 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9433}
9434
9435LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9436 armnn::IWorkloadFactory& workloadFactory,
9437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9438{
9439 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9440}
9441
9442LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9443 armnn::IWorkloadFactory& workloadFactory,
9444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9445{
9446 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9447}
9448
9449LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9450 armnn::IWorkloadFactory& workloadFactory,
9451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9452{
9453 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9454}
9455
9456LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9457 armnn::IWorkloadFactory& workloadFactory,
9458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9459{
9460 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9461}
9462
9463LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9464 armnn::IWorkloadFactory& workloadFactory,
9465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9466{
9467 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9468}
9469
9470LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9471 armnn::IWorkloadFactory& workloadFactory,
9472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9473{
9474 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9475}
9476
9477LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9478 armnn::IWorkloadFactory& workloadFactory,
9479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9480{
9481 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9482}
9483
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009484LayerTestResult<float, 4> Debug4DFloat32Test(
9485 armnn::IWorkloadFactory& workloadFactory,
9486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9487{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009488 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009489}
9490
9491LayerTestResult<float, 3> Debug3DFloat32Test(
9492 armnn::IWorkloadFactory& workloadFactory,
9493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9494{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009495 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009496}
9497
9498LayerTestResult<float, 2> Debug2DFloat32Test(
9499 armnn::IWorkloadFactory& workloadFactory,
9500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9501{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009502 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009503}
9504
9505LayerTestResult<float, 1> Debug1DFloat32Test(
9506 armnn::IWorkloadFactory& workloadFactory,
9507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9508{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009509 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009510}
9511
9512LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9513 armnn::IWorkloadFactory& workloadFactory,
9514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9515{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009516 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009517}
9518
9519LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9520 armnn::IWorkloadFactory& workloadFactory,
9521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9522{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009523 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009524}
9525
9526LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9527 armnn::IWorkloadFactory& workloadFactory,
9528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9529{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009530 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009531}
9532
9533LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9534 armnn::IWorkloadFactory& workloadFactory,
9535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9536{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009537 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009538}
Matteo Martincigh49124022019-01-11 13:25:59 +00009539
narpra014951d842019-01-18 16:53:53 +00009540LayerTestResult<float, 1> Gather1DParamsFloatTest(
9541 armnn::IWorkloadFactory& workloadFactory,
9542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9543{
9544 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9545}
9546
9547LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9548 armnn::IWorkloadFactory& workloadFactory,
9549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9550{
9551 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9552}
9553
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009554LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
9555 armnn::IWorkloadFactory& workloadFactory,
9556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9557{
9558 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9559}
9560
narpra014951d842019-01-18 16:53:53 +00009561LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9562 armnn::IWorkloadFactory& workloadFactory,
9563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9564{
9565 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9566}
9567
9568LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9569 armnn::IWorkloadFactory& workloadFactory,
9570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9571{
9572 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9573}
9574
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009575LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
9576 armnn::IWorkloadFactory& workloadFactory,
9577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9578{
9579 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9580}
9581
narpra014951d842019-01-18 16:53:53 +00009582LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9583 armnn::IWorkloadFactory& workloadFactory,
9584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9585{
9586 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9587}
9588
9589LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9590 armnn::IWorkloadFactory& workloadFactory,
9591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9592{
9593 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9594 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009595}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009596
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009597LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
9598 armnn::IWorkloadFactory& workloadFactory,
9599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9600{
9601 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
9602 workloadFactory, memoryManager);
9603}
9604
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009605LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009606 armnn::IWorkloadFactory& workloadFactory,
9607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9608{
9609 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9610}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009611
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009612LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9613 armnn::IWorkloadFactory& workloadFactory,
9614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9615{
9616 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9617}
9618
9619LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9620 armnn::IWorkloadFactory& workloadFactory,
9621 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9622{
9623 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9624}
9625
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009626LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9627 armnn::IWorkloadFactory& workloadFactory,
9628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9629{
9630 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9631}
9632
9633LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9634 armnn::IWorkloadFactory& workloadFactory,
9635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9636{
9637 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9638}
9639
9640LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9641 armnn::IWorkloadFactory& workloadFactory,
9642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9643{
9644 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9645}