blob: d9ae546739a309aa3b3126090db47160fe7dfd5a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Teresa Charlinedeeb162019-06-14 11:09:19 +0100586LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
587 armnn::IWorkloadFactory& workloadFactory,
588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
589 armnn::DataLayout layout)
590{
591 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
592 workloadFactory, memoryManager, layout, 0.0f, 0);
593}
594
595LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
596 armnn::IWorkloadFactory& workloadFactory,
597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
598 armnn::DataLayout layout)
599{
600 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
601 <armnn::DataType::Float32, armnn::DataType::Float32>(
602 workloadFactory, memoryManager, layout, 0.0f, 0);
603}
604
605LayerTestResult<float, 4> Convolution1dTest(
606 armnn::IWorkloadFactory& workloadFactory,
607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
608 bool biasEnabled)
609{
610 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
611 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
612}
613
614LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 bool biasEnabled)
618{
619 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
620 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
621}
622
623LayerTestResult<float,4> CompareConvolution2dTest(
624 armnn::IWorkloadFactory& workloadFactory,
625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
626 armnn::IWorkloadFactory& refWorkloadFactory)
627{
628 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
629 workloadFactory, memoryManager, refWorkloadFactory);
630}
631
632template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
633LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const std::vector<float>& inputNoQuantizedValues,
637 armnn::TensorInfo& inputTensorInfo,
638 const std::vector<float>& kernelNoQuantizedValues,
639 armnn::TensorInfo& kernelTensorInfo,
640 const std::vector<float>& outputExpectedNoQuantizedValues,
641 armnn::TensorInfo& outputTensorInfo,
642 uint32_t dilationX,
643 uint32_t dilationY,
644 armnn::DataLayout layout = armnn::DataLayout::NCHW,
645 bool biasEnabled = false
646)
647{
648 float qScale;
649 int32_t qOffset;
650 switch (ArmnnType)
651 {
652 case armnn::DataType::QuantisedAsymm8:
653 {
654 qScale = 0.1f;
655 qOffset = 128;
656 break;
657 }
658 case armnn::DataType::QuantisedSymm16:
659 {
660 qScale = 0.1f;
661 qOffset = 0;
662 break;
663 }
664 case armnn::DataType::Float32:
665 default:
666 {
667 qScale = 0.f;
668 qOffset = 0;
669 break;
670 }
671 }
672
673 inputTensorInfo.SetQuantizationScale(qScale);
674 inputTensorInfo.SetQuantizationOffset(qOffset);
675 kernelTensorInfo.SetQuantizationScale(qScale);
676 kernelTensorInfo.SetQuantizationOffset(qOffset);
677 outputTensorInfo.SetQuantizationScale(qScale);
678 outputTensorInfo.SetQuantizationOffset(qOffset);
679
680 auto input = MakeTensor<T, 4>(inputTensorInfo,
681 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
682 inputTensorInfo.GetQuantizationOffset(),
683 inputNoQuantizedValues)));
684 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
685 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
686 kernelTensorInfo.GetQuantizationOffset(),
687 kernelNoQuantizedValues)));
688 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
689 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
690 outputTensorInfo.GetQuantizationOffset(),
691 outputExpectedNoQuantizedValues)));
692
693 uint32_t padLeft = 0;
694 uint32_t padTop = 0;
695 uint32_t padRight = 0;
696 uint32_t padBottom = 0;
697 uint32_t strideX = 1;
698 uint32_t strideY = 1;
699
700 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
701 workloadFactory,
702 memoryManager,
703 input,
704 kernel,
705 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
706 expectedOutput,
707 qScale,
708 qOffset,
709 layout,
710 padLeft,
711 padTop,
712 padRight,
713 padBottom,
714 strideX,
715 strideY,
716 dilationX,
717 dilationY);
718}
719
720template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
721LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
722 armnn::IWorkloadFactory& workloadFactory,
723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
724 bool biasEnabled,
725 const armnn::DataLayout layout)
726{
727 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
728 std::vector<float> inputNoQuantizedValues =
729 {
730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
733 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
740 };
741
742 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
743 std::vector<float> kernelNoQuantizedValues =
744 {
745 1, 2, 3,
746 4, 5, 6,
747 7, 8, 9
748 };
749
750 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
751 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
752 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
753 std::vector<float> outputExpectedNoQuantizedValues =
754 {
755 6., 5., 5., 5.,
756 6., 5., 5., 5.,
757 6., 5., 5., 5.,
758 3., 2., 2., 2.
759 };
760
761 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
762 workloadFactory,
763 memoryManager,
764 inputNoQuantizedValues,
765 inputTensorInfo,
766 kernelNoQuantizedValues,
767 kernelTensorInfo,
768 outputExpectedNoQuantizedValues,
769 outputTensorInfo,
770 3,
771 3,
772 layout,
773 biasEnabled);
774}
775
776template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
777LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
778 armnn::IWorkloadFactory& workloadFactory,
779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
780 bool biasEnabled,
781 const armnn::DataLayout layout)
782{
783 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
784 std::vector<float> inputNoQuantizedValues =
785 {
786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
792 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
796
797 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
800 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
803 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
806 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
807 };
808
809 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
810 std::vector<float> kernelNoQuantizedValues =
811 {
812 1, 2, 3,
813 4, 5, 6,
814 7, 8, 9,
815
816 1, 2, 3,
817 4, 5, 6,
818 7, 8, 9
819 };
820
821 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
822 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
823 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
824 std::vector<float> outputExpectedNoQuantizedValues =
825 {
826 12., 10., 10., 10.,
827 12., 10., 10., 10.,
828 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100829 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100830 };
831
832 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
833 workloadFactory,
834 memoryManager,
835 inputNoQuantizedValues,
836 inputTensorInfo,
837 kernelNoQuantizedValues,
838 kernelTensorInfo,
839 outputExpectedNoQuantizedValues,
840 outputTensorInfo,
841 3,
842 3,
843 layout,
844 biasEnabled);
845}
846
847template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
848Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
849 armnn::IWorkloadFactory&,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
851 bool,
852 armnn::DataLayout);
853
854template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
855Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
856 armnn::IWorkloadFactory&,
857 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
858 bool,
859 armnn::DataLayout);
860
861template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
862Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
863 armnn::IWorkloadFactory&,
864 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
865 bool,
866 armnn::DataLayout);
867
868template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
869Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
870 armnn::IWorkloadFactory&,
871 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
872 bool,
873 armnn::DataLayout);
874
875template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
876Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
877 armnn::IWorkloadFactory&,
878 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
879 bool,
880 armnn::DataLayout);
881
882template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
883Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
884 armnn::IWorkloadFactory&,
885 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
886 bool,
887 armnn::DataLayout);
888
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000889template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
890 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000891LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
892 armnn::IWorkloadFactory& workloadFactory,
893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
894 float qScale,
895 int32_t qOffset,
896 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000897 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100898{
telsoa01c577f2c2018-08-31 09:22:23 +0100899 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000900 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100901 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100902 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
903 {
surmeh013537c2c2018-05-18 16:31:43 +0100904 0, 1, 2, 3, 4,
905 5, 6, 7, 8, 9,
906 10, 11, 12, 13, 14,
907 15, 16, 17, 18, 19,
908 20, 21, 22, 23, 24,
909
910 25, 26, 27, 28, 29,
911 30, 31, 32, 33, 34,
912 35, 36, 37, 38, 39,
913 40, 41, 42, 43, 44,
914 45, 46, 47, 48, 49
915 })));
916
telsoa01c577f2c2018-08-31 09:22:23 +0100917 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000918 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100919 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100920 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
921 {
surmeh013537c2c2018-05-18 16:31:43 +0100922 32, 31, 30, 29,
923 28, 27, 26, 25,
924 24, 23, 22, 21,
925 20, 19, 18, 17,
926
927 16, 15, 14, 13,
928 12, 11, 10, 9,
929 8, 7, 6, 5,
930 4, 3, 2, 1
931 })));
932
telsoa01c577f2c2018-08-31 09:22:23 +0100933 // Expected output is 1 batch of a 2-channel 5x5 image.
934 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000935 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100936 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100937 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
938 {
surmeh013537c2c2018-05-18 16:31:43 +0100939 1062, 1580, 1850, 1530, 1117,
940 2140, 3108, 3500, 2842, 2042,
941 3580, 5068, 5460, 4342, 3062,
942 3618, 5072, 5390, 4248, 2971,
943 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100944
surmeh013537c2c2018-05-18 16:31:43 +0100945 1550, 2284, 2362, 1955, 1428,
946 2910, 4206, 4342, 3528, 2536,
947 3390, 4886, 5022, 4068, 2916,
948 3566, 5056, 5182, 4133, 2922,
949 3100, 4352, 4452, 3517, 2465
950 })));
951
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000952 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
953 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000954 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100955 input,
956 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100957 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100958 expectedOutput,
959 qScale,
960 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100961 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100962 1, // Padding left.
963 1, // Padding top.
964 2, // Padding right.
965 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100966 1, // strideX
967 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000968}
969
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000970template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
971 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000972LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
973 armnn::IWorkloadFactory& workloadFactory,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
975 float qScale,
976 int32_t qOffset,
977 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100978{
Teresa Charlin20b1f882019-06-19 09:34:37 +0100979 auto layout = armnn::DataLayout::NHWC;
980
981 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100982 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100983 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
984 {
985 0, 1, 2, 3, 4,
986 5, 6, 7, 8, 9,
987 10, 11, 12, 13, 14,
988 15, 16, 17, 18, 19,
989 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100990
Teresa Charlin20b1f882019-06-19 09:34:37 +0100991 25, 26, 27, 28, 29,
992 30, 31, 32, 33, 34,
993 35, 36, 37, 38, 39,
994 40, 41, 42, 43, 44,
995 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +0100996 })));
997
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000998 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100999 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001000 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1001 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001002 32, 31, 30, 29,
1003 28, 27, 26, 25,
1004 24, 23, 22, 21,
1005 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001006
Matteo Martincigh747ef822018-12-18 09:26:39 +00001007 16, 15, 14, 13,
1008 12, 11, 10, 9,
1009 8, 7, 6, 5,
1010 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001011 })));
1012
Teresa Charlin20b1f882019-06-19 09:34:37 +01001013 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001014 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001015 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1016 {
1017 1062, 1580, 1850, 1530, 1117,
1018 2140, 3108, 3500, 2842, 2042,
1019 3580, 5068, 5460, 4342, 3062,
1020 3618, 5072, 5390, 4248, 2971,
1021 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001022
Teresa Charlin20b1f882019-06-19 09:34:37 +01001023 1550, 2284, 2362, 1955, 1428,
1024 2910, 4206, 4342, 3528, 2536,
1025 3390, 4886, 5022, 4068, 2916,
1026 3566, 5056, 5182, 4133, 2922,
1027 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001028 })));
1029
Teresa Charlin20b1f882019-06-19 09:34:37 +01001030 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001031 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001032 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001033 input,
1034 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001035 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001036 expectedOutput,
1037 qScale,
1038 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001039 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001040 1, // Padding left.
1041 1, // Padding top.
1042 2, // Padding right.
1043 2, // Padding bottom.
1044 1, // strideX
1045 1); // strideY
1046}
1047
Bruno Goncalves22972f02019-04-26 21:03:24 -03001048template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1049 typename T = armnn::ResolveType<ArmnnType>>
1050LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1051 armnn::IWorkloadFactory& workloadFactory,
1052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1053 float qScale,
1054 int32_t qOffset,
1055 bool biasEnabled)
1056{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001057 auto layout = armnn::DataLayout::NHWC;
1058
1059 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001060 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1062 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001063 0, 0, 0, 0, 0, 0, 0, 0, 0,
1064 0, 0, 0, 0, 0, 0, 0, 0, 0,
1065 0, 0, 0, 0, 0, 0, 0, 0, 0,
1066 0, 0, 0, 1, 1, 1, 0, 0, 0,
1067 0, 0, 0, 1, 1, 1, 0, 0, 0,
1068 0, 0, 0, 1, 1, 1, 0, 0, 0,
1069 0, 0, 0, 0, 0, 0, 0, 0, 0,
1070 0, 0, 0, 0, 0, 0, 0, 0, 0,
1071 0, 0, 0, 0, 0, 0, 0, 0, 0
1072 })));
1073
1074 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1075 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001076 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1077 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001078 1, 2, 3,
1079 4, 5, 6,
1080 7, 8, 9
1081 })));
1082
1083 uint32_t padLeft = 0;
1084 uint32_t padTop = 0;
1085 uint32_t padRight = 0;
1086 uint32_t padBottom = 0;
1087 uint32_t strideX = 1;
1088 uint32_t strideY = 1;
1089 uint32_t dilationX = 3;
1090 uint32_t dilationY = 3;
1091
1092 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001093 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001094 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001095 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1096 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001097 5, 5, 5,
1098 5, 5, 5,
1099 5, 5, 5
1100 })));
1101
Teresa Charlin20b1f882019-06-19 09:34:37 +01001102 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001103 workloadFactory,
1104 memoryManager,
1105 input,
1106 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001107 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001108 expectedOutput,
1109 qScale,
1110 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001111 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001112 padLeft,
1113 padTop,
1114 padRight,
1115 padBottom,
1116 strideX,
1117 strideY,
1118 dilationX,
1119 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001120}
1121
Teresa Charlin20b1f882019-06-19 09:34:37 +01001122
1123template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1124LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1125 armnn::IWorkloadFactory& workloadFactory,
1126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1127 const std::vector<float>& inputNoQuantizedValues,
1128 armnn::TensorInfo& inputTensorInfo,
1129 const std::vector<float>& kernelNoQuantizedValues,
1130 armnn::TensorInfo& kernelTensorInfo,
1131 const std::vector<float>& outputExpectedNoQuantizedValues,
1132 armnn::TensorInfo& outputTensorInfo,
1133 uint32_t dilationX,
1134 uint32_t dilationY,
1135 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1136 bool biasEnabled = false)
1137{
1138 float qScale;
1139 int32_t qOffset;
1140 switch (ArmnnType)
1141 {
1142 case armnn::DataType::QuantisedAsymm8:
1143 {
1144 qScale = 0.1f;
1145 qOffset = 128;
1146 break;
1147 }
1148 case armnn::DataType::QuantisedSymm16:
1149 {
1150 qScale = 0.1f;
1151 qOffset = 0;
1152 break;
1153 }
1154 case armnn::DataType::Float32:
1155 default:
1156 {
1157 qScale = 0.f;
1158 qOffset = 0;
1159 break;
1160 }
1161 }
1162
1163 inputTensorInfo.SetQuantizationScale(qScale);
1164 inputTensorInfo.SetQuantizationOffset(qOffset);
1165 kernelTensorInfo.SetQuantizationScale(qScale);
1166 kernelTensorInfo.SetQuantizationOffset(qOffset);
1167 outputTensorInfo.SetQuantizationScale(qScale);
1168 outputTensorInfo.SetQuantizationOffset(qOffset);
1169
1170 auto input = MakeTensor<T, 4>(inputTensorInfo,
1171 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1172 inputTensorInfo.GetQuantizationOffset(),
1173 inputNoQuantizedValues)));
1174 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1175 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1176 kernelTensorInfo.GetQuantizationOffset(),
1177 kernelNoQuantizedValues)));
1178 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1179 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1180 outputTensorInfo.GetQuantizationOffset(),
1181 outputExpectedNoQuantizedValues)));
1182
1183 uint32_t padLeft = 0;
1184 uint32_t padTop = 0;
1185 uint32_t padRight = 0;
1186 uint32_t padBottom = 0;
1187 uint32_t strideX = 1;
1188 uint32_t strideY = 1;
1189
1190 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1191 workloadFactory,
1192 memoryManager,
1193 input,
1194 kernel,
1195 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1196 expectedOutput,
1197 qScale,
1198 qOffset,
1199 layout,
1200 padLeft,
1201 padTop,
1202 padRight,
1203 padBottom,
1204 strideX,
1205 strideY,
1206 dilationX,
1207 dilationY);
1208}
1209
1210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1211LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1214 bool biasEnabled,
1215 const armnn::DataLayout layout)
1216{
1217 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1218 std::vector<float> inputNoQuantizedValues =
1219 {
1220 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1221 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1222 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1223 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1224 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1225 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1230 };
1231
1232 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1233 std::vector<float> kernelNoQuantizedValues =
1234 {
1235 1, 2, 3,
1236 4, 5, 6,
1237 7, 8, 9
1238 };
1239
1240 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1241 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1242 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1243 std::vector<float> outputExpectedNoQuantizedValues =
1244 {
1245 6., 5., 5., 5.,
1246 6., 5., 5., 5.,
1247 6., 5., 5., 5.,
1248 3., 2., 2., 2.
1249 };
1250
1251 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1252 workloadFactory,
1253 memoryManager,
1254 inputNoQuantizedValues,
1255 inputTensorInfo,
1256 kernelNoQuantizedValues,
1257 kernelTensorInfo,
1258 outputExpectedNoQuantizedValues,
1259 outputTensorInfo,
1260 3,
1261 3,
1262 layout,
1263 biasEnabled);
1264}
1265
1266template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1267LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1268 armnn::IWorkloadFactory& workloadFactory,
1269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1270 bool biasEnabled,
1271 const armnn::DataLayout layout)
1272{
1273 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1274 std::vector<float> inputNoQuantizedValues =
1275 {
1276 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1277 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1279 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1280 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1281 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1282 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1286
1287 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1290 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1291 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1292 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1293 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1297 };
1298
1299 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1300 std::vector<float> kernelNoQuantizedValues =
1301 {
1302 1, 2, 3,
1303 4, 5, 6,
1304 7, 8, 9,
1305
1306 1, 2, 3,
1307 4, 5, 6,
1308 7, 8, 9
1309 };
1310
1311 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1312 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1313 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1314 std::vector<float> outputExpectedNoQuantizedValues =
1315 {
1316 6., 5., 5., 5.,
1317 6., 5., 5., 5.,
1318 6., 5., 5., 5.,
1319 3., 2., 2., 2.,
1320
1321 6., 5., 5., 5.,
1322 6., 5., 5., 5.,
1323 6., 5., 5., 5.,
1324 3., 2., 2., 2.
1325 };
1326
1327 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1328 workloadFactory,
1329 memoryManager,
1330 inputNoQuantizedValues,
1331 inputTensorInfo,
1332 kernelNoQuantizedValues,
1333 kernelTensorInfo,
1334 outputExpectedNoQuantizedValues,
1335 outputTensorInfo,
1336 3,
1337 3,
1338 layout,
1339 biasEnabled);
1340}
1341
1342
1343template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1344DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1345 armnn::IWorkloadFactory&,
1346 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1347 bool,
1348 armnn::DataLayout);
1349
1350template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1351DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1352 armnn::IWorkloadFactory&,
1353 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1354 bool,
1355 armnn::DataLayout);
1356
1357template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1358DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1359 armnn::IWorkloadFactory&,
1360 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1361 bool,
1362 armnn::DataLayout);
1363
1364template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1365DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1366 armnn::IWorkloadFactory&,
1367 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1368 bool,
1369 armnn::DataLayout);
1370
1371template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1372DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1373 armnn::IWorkloadFactory&,
1374 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1375 bool,
1376 armnn::DataLayout);
1377
1378template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1379DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1380 armnn::IWorkloadFactory&,
1381 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1382 bool,
1383 armnn::DataLayout);
1384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001385LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1386 armnn::IWorkloadFactory& workloadFactory,
1387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001391 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001392 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001393}
1394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001395LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1396 armnn::IWorkloadFactory& workloadFactory,
1397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1398 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001399{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001400 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1401 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001402}
1403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001404LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1405 armnn::IWorkloadFactory& workloadFactory,
1406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1407 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001408 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001410 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001411 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001412}
1413
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001414LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1415 armnn::IWorkloadFactory& workloadFactory,
1416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1417 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001418 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001419{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001420 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001421 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001422}
1423
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001424LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1425 armnn::IWorkloadFactory& workloadFactory,
1426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1427 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001428 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001429{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001430 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001431 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001432}
1433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001434LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1435 armnn::IWorkloadFactory& workloadFactory,
1436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001440 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001442}
1443
Bruno Goncalves22972f02019-04-26 21:03:24 -03001444LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1445 armnn::IWorkloadFactory& workloadFactory,
1446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1447{
1448 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001449 workloadFactory,
1450 memoryManager,
1451 0.f,
1452 0,
1453 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001454}
1455
Ruomei Yan88d44b82019-05-23 14:29:06 +01001456LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1457 armnn::IWorkloadFactory& workloadFactory,
1458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1459 bool biasEnabled,
1460 const armnn::DataLayout layout)
1461{
1462 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1463 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1464}
1465
1466LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1467 armnn::IWorkloadFactory& workloadFactory,
1468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1469 bool biasEnabled,
1470 const armnn::DataLayout layout)
1471{
1472 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1473 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1474}
1475
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001476LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001477 armnn::IWorkloadFactory& workloadFactory,
1478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1479 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001480 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001481{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001482 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1483 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001484}
1485
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001486LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 armnn::IWorkloadFactory& refWorkloadFactory,
1490 const armnn::DataLayout layout)
1491{
1492 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1493 workloadFactory, memoryManager, refWorkloadFactory, layout);
1494}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001495
1496LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1497 armnn::IWorkloadFactory& workloadFactory,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001499{
1500 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1501 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001503}
1504
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001505LayerTestResult<float,4> SimpleNormalizationWithinTest(
1506 armnn::IWorkloadFactory& workloadFactory,
1507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001508{
1509 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1510 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001511 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001512}
1513
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001514LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1515 armnn::IWorkloadFactory& workloadFactory,
1516 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001517{
1518 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1519 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001520 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001521}
1522
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001523LayerTestResult<float,2> SimpleSoftmaxTest(
1524 armnn::IWorkloadFactory& workloadFactory,
1525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1526 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001527{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001528 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001531LayerTestResult<float,3> Simple3dSoftmaxTest(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1534 float beta)
1535{
1536 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1537}
1538
1539LayerTestResult<float,4> Simple4dSoftmaxTest(
1540 armnn::IWorkloadFactory& workloadFactory,
1541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1542 float beta)
1543{
1544 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1545}
1546
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001547LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1548 armnn::IWorkloadFactory& workloadFactory,
1549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1550 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001551{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001552 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001553}
1554
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001555LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1556 armnn::IWorkloadFactory& workloadFactory,
1557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1558 float beta)
1559{
1560 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1561}
1562
1563LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 float beta)
1567{
1568 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1569}
1570
nikraj01248683f2019-05-29 16:46:50 +01001571LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1574 float beta)
1575{
1576 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1577}
1578
1579LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1580 armnn::IWorkloadFactory& workloadFactory,
1581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1582 float beta)
1583{
1584 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1585}
1586
1587LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1588 armnn::IWorkloadFactory& workloadFactory,
1589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1590 float beta)
1591{
1592 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1593}
1594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001595LayerTestResult<float,4> CompareNormalizationTest(
1596 armnn::IWorkloadFactory& workloadFactory,
1597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1598 armnn::IWorkloadFactory& refWorkloadFactory,
1599 armnn::NormalizationAlgorithmChannel normChannel,
1600 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001601{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001602 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001603}
1604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001605LayerTestResult<float,2> CompareSoftmaxTest(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001608 armnn::IWorkloadFactory& refWorkloadFactory,
1609 float beta)
1610{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001611 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1612 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001613}
1614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001615LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001618 armnn::IWorkloadFactory& refWorkloadFactory,
1619 float beta)
1620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001621 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1622 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001623}
1624
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001625std::vector<LayerTestResult<float,3>> SplitterTest(
1626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001629 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001630}
1631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001632std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1633 armnn::IWorkloadFactory& workloadFactory,
1634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001636 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001637}
1638
Ruomei Yan25339c32019-05-28 16:48:20 +01001639std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1640 armnn::IWorkloadFactory& workloadFactory,
1641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1642{
1643 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1644}
1645
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001646LayerTestResult<float, 3> CopyViaSplitterTest(
1647 armnn::IWorkloadFactory& workloadFactory,
1648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001650 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001651}
1652
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001653LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1654 armnn::IWorkloadFactory& workloadFactory,
1655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001656{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001657 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001658}
1659
Ruomei Yan25339c32019-05-28 16:48:20 +01001660LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1661 armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1663{
1664 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1665}
1666
telsoa01c577f2c2018-08-31 09:22:23 +01001667LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001668 armnn::IWorkloadFactory& workloadFactory,
1669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001671 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001672 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1673 { 2., 3., 3., 4. }));
1674
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001675 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001676 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1677 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1678 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001679 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001680 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001681}
1682
1683LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001684 armnn::IWorkloadFactory& workloadFactory,
1685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001686{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001687 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001688 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1689 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1690 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1691
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001692 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001693 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1694 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1695 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1696 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1697 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1698 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1699 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1700 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001701 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1702 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001703}
1704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001705LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1706 armnn::IWorkloadFactory& workloadFactory,
1707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001708{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001709 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001710 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1711 {2., 3., 3., 4.}));
1712
1713
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001714 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001715 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1716 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1717 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1718
Conor Kennedyb9971c92019-05-07 07:14:23 +01001719 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001720 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001721}
1722
Conor Kennedyb9971c92019-05-07 07:14:23 +01001723LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1724 armnn::IWorkloadFactory& workloadFactory,
1725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1726{
1727 const float qScale = 1.0f;
1728 const int32_t qOffset = 0;
1729
1730 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1731 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1732
1733 armnn::TensorInfo inputDesc({2, 2}, datatype);
1734 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1735 std::vector<float>{2., 3., 3., 4.}));
1736
1737 armnn::TensorInfo outputDesc({2, 4}, datatype);
1738 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1739 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1740 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1741
1742 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1743 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1744
1745}
1746
1747LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1748 armnn::IWorkloadFactory& workloadFactory,
1749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1750{
1751 const float qScale = 1.0f;
1752 const int32_t qOffset = 0;
1753
1754 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1755 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1756
1757 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1758 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1759 std::vector<float>({ 2., 3., 3., 4. })));
1760
1761 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1762 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1763 qOffset, std::vector<float>(
1764 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1765 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1766
1767 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1768 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1769}
1770
1771LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1772 armnn::IWorkloadFactory& workloadFactory,
1773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1774{
1775 const float qScale = 2.0f;
1776 const int32_t qOffset = 0;
1777
1778 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1779 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1780
1781 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1782 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1783 qOffset, std::vector<float>(
1784 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1785 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1786
1787 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1788 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1789 qOffset, std::vector<float>(
1790 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1791 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1792 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1793 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1794 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1795 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1796
1797 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1798 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1799}
1800
1801LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1802 armnn::IWorkloadFactory& workloadFactory,
1803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1804{
1805 const float qScale = 1.0f;
1806 const int32_t qOffset = 0;
1807
1808 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1809
1810 armnn::TensorInfo inputDesc({2, 2}, datatype);
1811 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1812 qOffset, std::vector<float>{2., 3., 3., 4.}));
1813
1814 armnn::TensorInfo outputDesc({2, 4}, datatype);
1815 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1816 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1817 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1818
1819 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1820 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1821}
1822
Jim Flynn4ed6c832019-05-20 11:02:46 +01001823LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001824 armnn::IWorkloadFactory& workloadFactory,
1825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001826{
surmeh013537c2c2018-05-18 16:31:43 +01001827 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001828 unsigned int outputHeight = 6;
1829 unsigned int outputChannels = 3;
1830
surmeh013537c2c2018-05-18 16:31:43 +01001831 unsigned int inputWidth1 = 3;
1832 unsigned int inputHeight1 = 6;
1833 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001834
surmeh013537c2c2018-05-18 16:31:43 +01001835 unsigned int inputWidth2 = 3;
1836 unsigned int inputHeight2 = 6;
1837 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001838
telsoa01c577f2c2018-08-31 09:22:23 +01001839 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001840 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1841 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1842 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001843
1844 LayerTestResult<float,3> ret(outputTensorInfo);
1845
telsoa014fcda012018-03-09 14:13:49 +00001846 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001847 {
1848 1.0f, 2.0f, 3.0f,
1849 4.0f, 5.0f, 6.0f,
1850 7.0f, 8.0f, 9.0f,
1851 10.0f, 11.0f, 12.0f,
1852 13.0f, 14.0f, 15.0f,
1853 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001854
surmeh013537c2c2018-05-18 16:31:43 +01001855 19.0f, 20.0f, 21.0f,
1856 22.0f, 23.0f, 24.0f,
1857 25.0f, 26.0f, 27.0f,
1858 28.0f, 29.0f, 30.0f,
1859 31.0f, 32.0f, 33.0f,
1860 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001861
surmeh013537c2c2018-05-18 16:31:43 +01001862 37.0f, 38.0f, 39.0f,
1863 40.0f, 41.0f, 42.0f,
1864 43.0f, 44.0f, 45.0f,
1865 46.0f, 47.0f, 48.0f,
1866 49.0f, 50.0f, 51.0f,
1867 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001868 })
1869 );
1870
telsoa014fcda012018-03-09 14:13:49 +00001871 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1872 {
surmeh013537c2c2018-05-18 16:31:43 +01001873 1.0f, 2.0f, 3.0f,
1874 4.0f, 5.0f, 6.0f,
1875 7.0f, 8.0f, 9.0f,
1876 10.0f, 11.0f, 12.0f,
1877 13.0f, 14.0f, 15.0f,
1878 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001879
surmeh013537c2c2018-05-18 16:31:43 +01001880 19.0f, 20.0f, 21.0f,
1881 22.0f, 23.0f, 24.0f,
1882 25.0f, 26.0f, 27.0f,
1883 28.0f, 29.0f, 30.0f,
1884 31.0f, 32.0f, 33.0f,
1885 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001886 })
1887 );
1888
1889 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1890 {
surmeh013537c2c2018-05-18 16:31:43 +01001891 37.0f, 38.0f, 39.0f,
1892 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001893 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001894 46.0f, 47.0f, 48.0f,
1895 49.0f, 50.0f, 51.0f,
1896 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001897 })
1898 );
1899
telsoa01c577f2c2018-08-31 09:22:23 +01001900 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001901 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001902
telsoa01c577f2c2018-08-31 09:22:23 +01001903 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001904 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001905
telsoa014fcda012018-03-09 14:13:49 +00001906 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1907
1908 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1909
1910 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1911 subTensorsSupported ?
1912 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1913 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1914
1915 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1916 subTensorsSupported ?
1917 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1918 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1919
Jim Flynne242f2d2019-05-22 14:24:13 +01001920 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001921 armnn::WorkloadInfo info;
1922 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1923 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001924 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1925
1926 data.m_ViewOrigins.push_back(window1);
1927 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001928
Jim Flynn4ed6c832019-05-20 11:02:46 +01001929 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001930
1931 inputHandle1->Allocate();
1932 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001933 outputHandle->Allocate();
1934
1935 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1936 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001937
Derek Lambertif30f7d32019-04-09 10:25:02 +01001938 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001939 workload->Execute();
1940
1941 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1942
1943 return ret;
1944}
1945
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001946LayerTestResult<float,4> AdditionTest(
1947 armnn::IWorkloadFactory& workloadFactory,
1948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001949{
1950 unsigned int batchSize = 2;
1951 unsigned int channels = 2;
1952 unsigned int height = 2;
1953 unsigned int width = 3;
1954
1955 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1956 armnn::TensorInfo outputTensorInfo;
1957
1958 unsigned int shape[] = {batchSize, channels, height, width};
1959
1960 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1961 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1962 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1963
1964
1965 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1966 {
1967 0.0f, 2.0f, 1.0f,
1968 0.2f, 1.0f, 2.0f,
1969
1970 1.0f, 2.0f, 1.0f,
1971 0.2f, 1.0f, 2.0f,
1972
1973 0.0f, 2.0f, 1.0f,
1974 4.2f, 1.0f, 2.0f,
1975
1976 0.0f, 0.0f, 1.0f,
1977 0.2f, 1.0f, 2.0f,
1978 }));
1979
1980 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1981 {
1982 1.0f, 2.0f, 1.0f,
1983 0.0f, 1.0f, 2.0f,
1984
1985 1.0f, 2.0f, -2.0f,
1986 0.2f, 1.0f, 2.0f,
1987
1988 0.0f, 2.0f, 1.0f,
1989 4.2f, 0.0f, -3.0f,
1990
1991 0.0f, 0.0f, 1.0f,
1992 0.7f, 1.0f, 5.0f,
1993 }));
1994
1995 LayerTestResult<float,4> ret(outputTensorInfo);
1996 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1997 {
1998 1.0f, 4.0f, 2.0f,
1999 0.2f, 2.0f, 4.0f,
2000
2001 2.0f, 4.0f, -1.0f,
2002 0.4f, 2.0f, 4.0f,
2003
2004 0.0f, 4.0f, 2.0f,
2005 8.4f, 1.0f, -1.0f,
2006
2007 0.0f, 0.0f, 2.0f,
2008 0.9f, 2.0f, 7.0f,
2009 }));
2010
2011 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2012 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2013 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2014
2015 armnn::AdditionQueueDescriptor data;
2016 armnn::WorkloadInfo info;
2017 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2018 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2019 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2020
2021 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2022
2023 inputHandle1->Allocate();
2024 inputHandle2->Allocate();
2025 outputHandle->Allocate();
2026
2027 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2028 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2029
Derek Lambertif30f7d32019-04-09 10:25:02 +01002030 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002031 workload->Execute();
2032
2033 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2034
2035 return ret;
2036}
2037
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002038template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 float qScale,
2043 int32_t qOffset)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2046 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2047 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002048
2049 if (armnn::IsQuantizedType<T>())
2050 {
2051 inputTensorInfo1.SetQuantizationScale(qScale);
2052 inputTensorInfo1.SetQuantizationOffset(qOffset);
2053 inputTensorInfo2.SetQuantizationScale(qScale);
2054 inputTensorInfo2.SetQuantizationOffset(qOffset);
2055 outputTensorInfo.SetQuantizationScale(qScale);
2056 outputTensorInfo.SetQuantizationOffset(qOffset);
2057 }
2058
2059 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2060 {
2061 0.0f,
2062 1.0f,
2063
2064 2.0f,
2065 3.0f,
2066
2067 4.0f,
2068 5.0f,
2069 }));
2070
2071 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2072 {
2073 0.5f, 1.5f, 2.5f,
2074 3.5f, 4.5f, 5.5f,
2075 }));
2076
2077 LayerTestResult<T,4> ret(outputTensorInfo);
2078 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2079 {
2080 0.5f, 1.5f, 2.5f,
2081 4.5f, 5.5f, 6.5f,
2082
2083 2.5f, 3.5f, 4.5f,
2084 6.5f, 7.5f, 8.5f,
2085
2086 4.5f, 5.5f, 6.5f,
2087 8.5f, 9.5f, 10.5f,
2088 }));
2089
2090 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2091 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2092 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2093
2094 armnn::AdditionQueueDescriptor data;
2095 armnn::WorkloadInfo info;
2096 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2097 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2098 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2099
2100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2101
2102 inputHandle1->Allocate();
2103 inputHandle2->Allocate();
2104 outputHandle->Allocate();
2105
2106 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2107 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2108
Derek Lambertif30f7d32019-04-09 10:25:02 +01002109 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002110 workload->Execute();
2111
2112 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2113
2114 return ret;
2115}
2116
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002117template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002118LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2119 armnn::IWorkloadFactory& workloadFactory,
2120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002121 float qScale,
2122 int32_t qOffset)
2123{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002124 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2125 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2126 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002127
2128 if (armnn::IsQuantizedType<T>())
2129 {
2130 inputTensorInfo1.SetQuantizationScale(qScale);
2131 inputTensorInfo1.SetQuantizationOffset(qOffset);
2132 inputTensorInfo2.SetQuantizationScale(qScale);
2133 inputTensorInfo2.SetQuantizationOffset(qOffset);
2134 outputTensorInfo.SetQuantizationScale(qScale);
2135 outputTensorInfo.SetQuantizationOffset(qOffset);
2136 }
2137
2138 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2139 {
2140 0.0f, 1.0f, 2.0f,
2141 3.0f, 4.0f, 5.0f,
2142 6.0f, 7.0f, 8.0f,
2143 9.0f, 10.0f, 11.0f,
2144 12.0f, 13.0f, 14.0f,
2145 15.0f, 16.0f, 17.0f,
2146 }));
2147
2148 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2149 {
2150 0.5f,
2151 }));
2152
2153 LayerTestResult<T,4> ret(outputTensorInfo);
2154 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2155 {
2156 0.5f, 1.5f, 2.5f,
2157 3.5f, 4.5f, 5.5f,
2158 6.5f, 7.5f, 8.5f,
2159 9.5f, 10.5f, 11.5f,
2160 12.5f, 13.5f, 14.5f,
2161 15.5f, 16.5f, 17.5f,
2162 }));
2163
2164 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2166 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2167
2168 armnn::AdditionQueueDescriptor data;
2169 armnn::WorkloadInfo info;
2170 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2171 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2172 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2173
2174 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2175
2176 inputHandle1->Allocate();
2177 inputHandle2->Allocate();
2178 outputHandle->Allocate();
2179
2180 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2181 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2182
Derek Lambertif30f7d32019-04-09 10:25:02 +01002183 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002184 workload->Execute();
2185
2186 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2187
2188 return ret;
2189}
2190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002191LayerTestResult<float, 4> AdditionBroadcastTest(
2192 armnn::IWorkloadFactory& workloadFactory,
2193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002194{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002195 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2196 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002197}
2198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002199LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2200 armnn::IWorkloadFactory& workloadFactory,
2201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002202{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002203 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2204 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002205}
2206
Sadik Armagan2999a022019-04-09 14:20:12 +01002207LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2208 armnn::IWorkloadFactory& workloadFactory,
2209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2210{
2211 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2212 workloadFactory, memoryManager, 2.f, 0);
2213}
2214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002215LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2216 armnn::IWorkloadFactory& workloadFactory,
2217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002218{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002219 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2220 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002221}
2222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002223LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2224 armnn::IWorkloadFactory& workloadFactory,
2225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002226{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002227 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2228 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002229}
2230
Sadik Armagan2999a022019-04-09 14:20:12 +01002231LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2232 armnn::IWorkloadFactory& workloadFactory,
2233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2234{
2235 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2236 workloadFactory, memoryManager, 0.1333333f, 0);
2237}
2238
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239LayerTestResult<float,4> CompareAdditionTest(
2240 armnn::IWorkloadFactory& workloadFactory,
2241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2242 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002243{
2244 unsigned int batchSize = 4;
2245 unsigned int channels = 1;
2246 unsigned int height = 2;
2247 unsigned int width = 3;
2248
2249 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2250 armnn::TensorInfo outputTensorInfo;
2251
2252 unsigned int shape[] = {batchSize, channels, height, width};
2253
2254 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2255 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2256 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2257
2258 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2259 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2260
2261 LayerTestResult<float,4> ret(outputTensorInfo);
2262
2263 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2264 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2265 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2266
2267 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2268 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2269 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2270
2271 armnn::AdditionQueueDescriptor data;
2272 armnn::WorkloadInfo info;
2273 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2274 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2275 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2276
2277 armnn::AdditionQueueDescriptor refData = data;
2278 armnn::WorkloadInfo refInfo = info;
2279 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2280 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2281 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2282
2283 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2284 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2285
2286 inputHandle1->Allocate();
2287 inputHandle2->Allocate();
2288 outputHandle->Allocate();
2289 inputHandle1Ref->Allocate();
2290 inputHandle2Ref->Allocate();
2291 outputHandleRef->Allocate();
2292
2293 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2294 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2295 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2296 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2297
Derek Lambertif30f7d32019-04-09 10:25:02 +01002298 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002299 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002300 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002301 workloadRef->Execute();
2302
2303 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2304 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2305
2306 return ret;
2307}
2308
surmeh01bceff2f2018-03-29 16:29:27 +01002309namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002310template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002311LayerTestResult<T, 4> DivisionTestHelper(
2312 armnn::IWorkloadFactory& workloadFactory,
2313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2314 const unsigned int shape0[4],
2315 const std::vector<T>& values0,
2316 float scale0,
2317 int32_t offset0,
2318 const unsigned int shape1[4],
2319 const std::vector<T> & values1,
2320 float scale1,
2321 int32_t offset1,
2322 const unsigned int outShape[4],
2323 const std::vector<T> & outValues,
2324 float outScale,
2325 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002326{
Sadik Armagan2999a022019-04-09 14:20:12 +01002327 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2328 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2329 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002330
David Beck5cd01f32018-09-12 16:00:08 +01002331 inputTensorInfo0.SetQuantizationScale(scale0);
2332 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002333
David Beck5cd01f32018-09-12 16:00:08 +01002334 inputTensorInfo1.SetQuantizationScale(scale1);
2335 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002336
David Beck5cd01f32018-09-12 16:00:08 +01002337 outputTensorInfo.SetQuantizationScale(outScale);
2338 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002339
David Beck5cd01f32018-09-12 16:00:08 +01002340 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2341 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002342
David Beck5cd01f32018-09-12 16:00:08 +01002343 LayerTestResult<T, 4> result(outputTensorInfo);
2344 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002345
David Beck5cd01f32018-09-12 16:00:08 +01002346 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2347 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2348 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002349
David Beck5cd01f32018-09-12 16:00:08 +01002350 armnn::DivisionQueueDescriptor data;
2351 armnn::WorkloadInfo info;
2352 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2353 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2354 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002355
David Beck5cd01f32018-09-12 16:00:08 +01002356 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002357
David Beck5cd01f32018-09-12 16:00:08 +01002358 inputHandle0->Allocate();
2359 inputHandle1->Allocate();
2360 outputHandle->Allocate();
2361
2362 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2363 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2364
Derek Lambertif30f7d32019-04-09 10:25:02 +01002365 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002366 workload->Execute();
2367
2368 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2369
2370 return result;
2371}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002372} // anonymous namespace
2373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002374LayerTestResult<float,4> DivisionByZeroTest(
2375 armnn::IWorkloadFactory& workloadFactory,
2376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002377{
2378 const unsigned int width = 2;
2379 const unsigned int height = 2;
2380 const unsigned int channelCount = 2;
2381 const unsigned int batchSize = 2;
2382
2383 unsigned int shape[] = { batchSize, channelCount, height, width };
2384
2385 std::vector<float> input0({
2386 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2387 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2388
2389 std::vector<float> input1({
2390 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2391 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2392
2393 std::vector<float> output({
2394 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2395 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2396
Sadik Armagan2999a022019-04-09 14:20:12 +01002397 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2398 memoryManager,
2399 shape, input0, 1.0f, 0,
2400 shape, input1, 1.0f, 0,
2401 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002402}
2403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002404LayerTestResult<float,4> DivisionTest(
2405 armnn::IWorkloadFactory& workloadFactory,
2406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002407{
2408 const unsigned int width = 2;
2409 const unsigned int height = 2;
2410 const unsigned int channelCount = 2;
2411 const unsigned int batchSize = 2;
2412
2413 unsigned int shape[] = { batchSize, channelCount, height, width };
2414
2415 std::vector<float> input0({
2416 2, 2, 2, 2, 3, 3, 3, 3,
2417 4, 4, 4, 4, 5, 5, 5, 5 });
2418
2419 std::vector<float> input1({
2420 1, 1, 1, 1, 2, 2, 2, 2,
2421 4, 4, 4, 4, 4, 4, 4, 4 });
2422
2423 std::vector<float> output({
2424 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2425 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2426
David Beck5cd01f32018-09-12 16:00:08 +01002427
Sadik Armagan2999a022019-04-09 14:20:12 +01002428 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2429 memoryManager,
2430 shape, input0, 1.0f, 0,
2431 shape, input1, 1.0f, 0,
2432 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002433}
2434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002435LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2436 armnn::IWorkloadFactory& workloadFactory,
2437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002438{
2439 unsigned int shape0[] = { 1, 2, 2, 2 };
2440 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2441
2442 unsigned int shape1[] = { 1, 1, 1, 1 };
2443 std::vector<float> input1({ 2 });
2444
2445 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2446
David Beck5cd01f32018-09-12 16:00:08 +01002447
Sadik Armagan2999a022019-04-09 14:20:12 +01002448 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2449 memoryManager,
2450 shape0, input0, 1.0f, 0,
2451 shape1, input1, 1.0f, 0,
2452 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002453}
2454
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002455LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2456 armnn::IWorkloadFactory& workloadFactory,
2457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002458{
2459 unsigned int shape0[] = { 1, 3, 3, 2 };
2460 std::vector<float> input0({
2461 1, 4, 3, 8, 5, 12,
2462 7, 16, 9, 20, 11, 24,
2463 13, 28, 15, 32, 17, 36});
2464
2465 unsigned int shape1[] = { 1, 1, 1, 2 };
2466 std::vector<float> input1({ 1, 2 });
2467
2468 std::vector<float> output({
2469 1, 2, 3, 4, 5, 6,
2470 7, 8, 9, 10, 11, 12,
2471 13, 14, 15, 16, 17, 18});
2472
Sadik Armagan2999a022019-04-09 14:20:12 +01002473 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2474 memoryManager,
2475 shape0, input0, 1.0f, 0,
2476 shape1, input1, 1.0f, 0,
2477 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002478}
2479
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002480LayerTestResult<uint8_t,4> DivisionUint8Test(
2481 armnn::IWorkloadFactory& workloadFactory,
2482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002483{
2484 const unsigned int width = 2;
2485 const unsigned int height = 2;
2486 const unsigned int channelCount = 2;
2487 const unsigned int batchSize = 2;
2488
2489 unsigned int shape[] = { batchSize, channelCount, height, width };
2490
2491 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2492 4, 4, 4, 4, 5, 5, 5, 5 });
2493
2494 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2495 4, 4, 4, 4, 4, 4, 4, 4 });
2496
2497 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2498 4, 4, 4, 4, 5, 5, 5, 5});
2499
2500
Sadik Armagan2999a022019-04-09 14:20:12 +01002501 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2502 memoryManager,
2503 shape, input0, 1.0f, 0,
2504 shape, input1, 1.0f, 0,
2505 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002506}
2507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002508LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2509 armnn::IWorkloadFactory& workloadFactory,
2510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002511{
2512 unsigned int shape0[] = { 1, 2, 2, 2 };
2513 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2514
2515 unsigned int shape1[] = { 1, 1, 1, 1 };
2516 std::vector<uint8_t> input1({ 2 });
2517
2518 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2519
Sadik Armagan2999a022019-04-09 14:20:12 +01002520 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2521 memoryManager,
2522 shape0, input0, 1.0f, 0,
2523 shape1, input1, 1.0f, 0,
2524 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002525}
2526
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002527LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2528 armnn::IWorkloadFactory& workloadFactory,
2529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002530{
2531 unsigned int shape0[] = { 1, 3, 3, 2 };
2532 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2533 7, 16, 9, 20, 11, 24,
2534 13, 28, 15, 32, 17, 36});
2535
2536 unsigned int shape1[] = { 1, 1, 1, 2 };
2537 std::vector<uint8_t> input1({ 1, 2 });
2538
2539 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2540 7, 8, 9, 10, 11, 12,
2541 13, 14, 15, 16, 17, 18});
2542
Sadik Armagan2999a022019-04-09 14:20:12 +01002543 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2544 memoryManager,
2545 shape0, input0, 1.0f, 0,
2546 shape1, input1, 1.0f, 0,
2547 shape0, output, 1.0f, 0);
2548}
2549
2550LayerTestResult<int16_t,4> DivisionInt16Test(
2551 armnn::IWorkloadFactory& workloadFactory,
2552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2553{
2554 unsigned int shape[] = { 2, 2, 2, 2 };
2555
2556 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2557 4, 4, 4, 4, 5, 5, 5, 5 });
2558
2559 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2560 4, 4, 4, 4, 4, 4, 4, 4 });
2561
2562 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2563 4, 4, 4, 4, 5, 5, 5, 5});
2564
2565
2566 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2567 memoryManager,
2568 shape, input0, 1.0f, 0,
2569 shape, input1, 1.0f, 0,
2570 shape, output, 0.25f, 0);
2571}
2572
2573LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2574 armnn::IWorkloadFactory& workloadFactory,
2575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2576{
2577 unsigned int shape0[] = { 1, 2, 2, 2 };
2578 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2579
2580 unsigned int shape1[] = { 1, 1, 1, 1 };
2581 std::vector<int16_t> input1({ 2 });
2582
2583 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2584
2585 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2586 memoryManager,
2587 shape0, input0, 1.0f, 0,
2588 shape1, input1, 1.0f, 0,
2589 shape0, output, 1.0f, 0);
2590}
2591
2592LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2593 armnn::IWorkloadFactory& workloadFactory,
2594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2595{
2596 unsigned int shape0[] = { 1, 3, 3, 2 };
2597 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2598 7, 16, 9, 20, 11, 24,
2599 13, 28, 15, 32, 17, 36});
2600
2601 unsigned int shape1[] = { 1, 1, 1, 2 };
2602 std::vector<int16_t> input1({ 1, 2 });
2603
2604 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2605 7, 8, 9, 10, 11, 12,
2606 13, 14, 15, 16, 17, 18});
2607
2608 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2609 memoryManager,
2610 shape0, input0, 1.0f, 0,
2611 shape1, input1, 1.0f, 0,
2612 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002613}
2614
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002615template<typename DescriptorType>
2616std::unique_ptr<armnn::IWorkload> CreateWorkload(
2617 const armnn::IWorkloadFactory& workloadFactory,
2618 const armnn::WorkloadInfo& info,
2619 const DescriptorType& descriptor)
2620{
2621 return CreateWorkload(workloadFactory, info, descriptor);
2622};
2623
2624template<>
2625std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2626 const armnn::IWorkloadFactory& workloadFactory,
2627 const armnn::WorkloadInfo& info,
2628 const armnn::MaximumQueueDescriptor& descriptor)
2629{
2630 return workloadFactory.CreateMaximum(descriptor, info);
2631}
2632
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002633template<>
2634std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2635 const armnn::IWorkloadFactory& workloadFactory,
2636 const armnn::WorkloadInfo& info,
2637 const armnn::MinimumQueueDescriptor& descriptor)
2638{
2639 return workloadFactory.CreateMinimum(descriptor, info);
2640}
2641
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002642template<>
2643std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2644 const armnn::IWorkloadFactory& workloadFactory,
2645 const armnn::WorkloadInfo& info,
2646 const armnn::EqualQueueDescriptor& descriptor)
2647{
2648 return workloadFactory.CreateEqual(descriptor, info);
2649}
2650
FrancisMurtagh878f0232018-12-19 10:56:15 +00002651template<>
2652std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2653 const armnn::IWorkloadFactory& workloadFactory,
2654 const armnn::WorkloadInfo& info,
2655 const armnn::GreaterQueueDescriptor& descriptor)
2656{
2657 return workloadFactory.CreateGreater(descriptor, info);
2658}
2659
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002660namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002661
2662template <typename Descriptor,
2663 armnn::DataType ArmnnTypeInput,
2664 armnn::DataType ArmnnTypeOutput,
2665 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2666 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2667LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2668 armnn::IWorkloadFactory & workloadFactory,
2669 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2670 const unsigned int shape0[4], std::vector<TInput> values0,
2671 const unsigned int shape1[4], std::vector<TInput> values1,
2672 const unsigned int outShape[4], std::vector<TOutput> outValues,
2673 float qScale = 0.0f, int qOffset = 0)
2674{
2675 const size_t dimensionCount = 4;
2676 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2677 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2678 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2679
2680 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2681 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2682
2683 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002684 {
kevmay012b4d88e2019-01-24 14:05:09 +00002685 inputTensorInfo0.SetQuantizationScale(qScale);
2686 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002687
kevmay012b4d88e2019-01-24 14:05:09 +00002688 inputTensorInfo1.SetQuantizationScale(qScale);
2689 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002690
kevmay012b4d88e2019-01-24 14:05:09 +00002691 outputTensorInfo.SetQuantizationScale(qScale);
2692 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002693 }
kevmay012b4d88e2019-01-24 14:05:09 +00002694
2695 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2696
2697 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2698 {
2699 ret.compareBoolean = true;
2700 }
2701
2702 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2703 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2704 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2705
2706 Descriptor data;
2707 armnn::WorkloadInfo info;
2708 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2709 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2710 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2711 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2712
2713 inputHandle0->Allocate();
2714 inputHandle1->Allocate();
2715 outputHandle->Allocate();
2716
2717 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2718 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2719
Derek Lambertif30f7d32019-04-09 10:25:02 +01002720 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002721 ExecuteWorkload(*workload, memoryManager);
2722
2723 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2724
2725 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2726 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002727}
2728
kevmay012b4d88e2019-01-24 14:05:09 +00002729template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2730LayerTestResult<T, 4> ElementwiseTestHelper(
2731 armnn::IWorkloadFactory & workloadFactory,
2732 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2733 const unsigned int shape0[4], std::vector<T> values0,
2734 const unsigned int shape1[4], std::vector<T> values1,
2735 const unsigned int outShape[4], std::vector<T> outValues,
2736 float qScale = 0.0f, int qOffset = 0)
2737{
2738 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2739 (workloadFactory,
2740 memoryManager,
2741 shape0,
2742 values0,
2743 shape1,
2744 values1,
2745 outShape,
2746 outValues,
2747 qScale,
2748 qOffset);
2749}
2750}
2751
2752LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002754{
2755 const unsigned int width = 2;
2756 const unsigned int height = 2;
2757 const unsigned int channelCount = 2;
2758 const unsigned int batchSize = 2;
2759
2760 unsigned int shape[] = { batchSize, channelCount, height, width };
2761
2762 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2763 3, 3, 3, 3, 4, 4, 4, 4 });
2764
2765 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2766 5, 5, 5, 5, 4, 4, 4, 4 });
2767
kevmay012b4d88e2019-01-24 14:05:09 +00002768 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2769 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002770
kevmay012b4d88e2019-01-24 14:05:09 +00002771 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002772 workloadFactory,
2773 memoryManager,
2774 shape,
2775 input0,
2776 shape,
2777 input1,
2778 shape,
2779 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002780}
2781
kevmay012b4d88e2019-01-24 14:05:09 +00002782LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002783 armnn::IWorkloadFactory& workloadFactory,
2784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2785{
2786 unsigned int shape0[] = { 1, 2, 2, 2 };
2787 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2788
2789 unsigned int shape1[] = { 1, 1, 1, 1 };
2790 std::vector<float> input1({ 1 });
2791
kevmay012b4d88e2019-01-24 14:05:09 +00002792 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002793
kevmay012b4d88e2019-01-24 14:05:09 +00002794 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002795 workloadFactory,
2796 memoryManager,
2797 shape0,
2798 input0,
2799 shape1,
2800 input1,
2801 shape0,
2802 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002803}
2804
kevmay012b4d88e2019-01-24 14:05:09 +00002805LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2808{
2809 const unsigned int shape0[] = { 1, 2, 2, 3 };
2810 const unsigned int shape1[] = { 1, 1, 1, 3 };
2811
2812 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2813 7, 8, 9, 10, 11, 12 });
2814
2815 std::vector<float> input1({ 1, 2, 3});
2816
kevmay012b4d88e2019-01-24 14:05:09 +00002817 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2818 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002819
kevmay012b4d88e2019-01-24 14:05:09 +00002820 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002821 workloadFactory,
2822 memoryManager,
2823 shape0,
2824 input0,
2825 shape1,
2826 input1,
2827 shape0,
2828 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002829}
2830
2831LayerTestResult<uint8_t, 4> EqualUint8Test(
2832 armnn::IWorkloadFactory& workloadFactory,
2833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2834{
2835 unsigned int shape[] = { 2, 2, 2, 2 };
2836
2837 // See dequantized values to the right.
2838 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002839 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002840
2841 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2842 3, 3, 3, 3, 5, 5, 5, 5 });
2843
2844 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2845 1, 1, 1, 1, 0, 0, 0, 0 });
2846
kevmay012b4d88e2019-01-24 14:05:09 +00002847 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2848 armnn::DataType::QuantisedAsymm8,
2849 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002850 workloadFactory,
2851 memoryManager,
2852 shape,
2853 input0,
2854 shape,
2855 input1,
2856 shape,
2857 output,
2858 1.0f,
2859 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002860}
2861
2862LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2863 armnn::IWorkloadFactory& workloadFactory,
2864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2865{
2866 const unsigned int shape0[] = { 1, 2, 2, 3 };
2867 const unsigned int shape1[] = { 1, 1, 1, 1 };
2868
2869 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2870 7, 8, 9, 10, 11, 12 });
2871
2872 std::vector<uint8_t> input1({ 1 });
2873
2874 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2875 0, 0, 0, 0, 0, 0 });
2876
kevmay012b4d88e2019-01-24 14:05:09 +00002877 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2878 armnn::DataType::QuantisedAsymm8,
2879 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002880 workloadFactory,
2881 memoryManager,
2882 shape0,
2883 input0,
2884 shape1,
2885 input1,
2886 shape0,
2887 output,
2888 1.0f,
2889 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002890}
2891
2892LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2893 armnn::IWorkloadFactory& workloadFactory,
2894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2895{
2896 const unsigned int shape0[] = { 1, 2, 2, 3 };
2897 const unsigned int shape1[] = { 1, 1, 1, 3 };
2898
2899 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2900 7, 8, 9, 10, 11, 12 });
2901
2902 std::vector<uint8_t> input1({ 1, 1, 3});
2903
2904 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2905 0, 0, 0, 0, 0, 0 });
2906
kevmay012b4d88e2019-01-24 14:05:09 +00002907 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2908 armnn::DataType::QuantisedAsymm8,
2909 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002910 workloadFactory,
2911 memoryManager,
2912 shape0,
2913 input0,
2914 shape1,
2915 input1,
2916 shape0,
2917 output,
2918 1.0f,
2919 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002920}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002921
kevmay012b4d88e2019-01-24 14:05:09 +00002922LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2924{
2925 const unsigned int width = 2;
2926 const unsigned int height = 2;
2927 const unsigned int channelCount = 2;
2928 const unsigned int batchSize = 2;
2929
2930 unsigned int shape[] = { batchSize, channelCount, height, width };
2931
2932 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2933 3, 3, 3, 3, 4, 4, 4, 4 });
2934
2935 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2936 5, 5, 5, 5, 4, 4, 4, 4 });
2937
kevmay012b4d88e2019-01-24 14:05:09 +00002938 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2939 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002940
kevmay012b4d88e2019-01-24 14:05:09 +00002941 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002942 workloadFactory,
2943 memoryManager,
2944 shape,
2945 input0,
2946 shape,
2947 input1,
2948 shape,
2949 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002950}
2951
kevmay012b4d88e2019-01-24 14:05:09 +00002952LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002953 armnn::IWorkloadFactory& workloadFactory,
2954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2955{
2956 unsigned int shape0[] = { 1, 2, 2, 2 };
2957 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2958
2959 unsigned int shape1[] = { 1, 1, 1, 1 };
2960 std::vector<float> input1({ 1 });
2961
kevmay012b4d88e2019-01-24 14:05:09 +00002962 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002963
kevmay012b4d88e2019-01-24 14:05:09 +00002964 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002965 workloadFactory,
2966 memoryManager,
2967 shape0,
2968 input0,
2969 shape1,
2970 input1,
2971 shape0,
2972 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002973}
2974
kevmay012b4d88e2019-01-24 14:05:09 +00002975LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002976 armnn::IWorkloadFactory& workloadFactory,
2977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2978{
2979 const unsigned int shape0[] = { 1, 2, 2, 3 };
2980 const unsigned int shape1[] = { 1, 1, 1, 3 };
2981
2982 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2983 7, 8, 9, 10, 11, 12 });
2984
2985 std::vector<float> input1({ 1, 3, 2});
2986
kevmay012b4d88e2019-01-24 14:05:09 +00002987 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2988 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002989
kevmay012b4d88e2019-01-24 14:05:09 +00002990 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002991 workloadFactory,
2992 memoryManager,
2993 shape0,
2994 input0,
2995 shape1,
2996 input1,
2997 shape0,
2998 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002999}
3000
3001LayerTestResult<uint8_t, 4> GreaterUint8Test(
3002 armnn::IWorkloadFactory& workloadFactory,
3003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3004{
3005 unsigned int shape[] = { 2, 2, 2, 2 };
3006
3007 // See dequantized values to the right.
3008 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3009 3, 3, 3, 3, 5, 5, 5, 5 });
3010
3011 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3012 2, 2, 2, 2, 5, 5, 5, 5 });
3013
3014 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3015 1, 1, 1, 1, 0, 0, 0, 0 });
3016
kevmay012b4d88e2019-01-24 14:05:09 +00003017 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3018 armnn::DataType::QuantisedAsymm8,
3019 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003020 workloadFactory,
3021 memoryManager,
3022 shape,
3023 input0,
3024 shape,
3025 input1,
3026 shape,
3027 output,
3028 1.0f,
3029 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003030}
3031
3032LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3033 armnn::IWorkloadFactory& workloadFactory,
3034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3035{
3036 const unsigned int shape0[] = { 1, 2, 2, 3 };
3037 const unsigned int shape1[] = { 1, 1, 1, 1 };
3038
3039 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3040 7, 8, 9, 10, 11, 12 });
3041
3042 std::vector<uint8_t> input1({ 1 });
3043
3044 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3045 1, 1, 1, 1, 1, 1 });
3046
kevmay012b4d88e2019-01-24 14:05:09 +00003047 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3048 armnn::DataType::QuantisedAsymm8,
3049 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003050 workloadFactory,
3051 memoryManager,
3052 shape0,
3053 input0,
3054 shape1,
3055 input1,
3056 shape0,
3057 output,
3058 1.0f,
3059 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003060}
3061
3062LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3063 armnn::IWorkloadFactory& workloadFactory,
3064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3065{
3066 const unsigned int shape0[] = { 1, 2, 2, 3 };
3067 const unsigned int shape1[] = { 1, 1, 1, 3 };
3068
3069 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3070 7, 8, 9, 10, 11, 12 });
3071
3072 std::vector<uint8_t> input1({ 1, 1, 3});
3073
3074 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3075 1, 1, 1, 1, 1, 1 });
3076
kevmay012b4d88e2019-01-24 14:05:09 +00003077 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3078 armnn::DataType::QuantisedAsymm8,
3079 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003080 workloadFactory,
3081 memoryManager,
3082 shape0,
3083 input0,
3084 shape1,
3085 input1,
3086 shape0,
3087 output,
3088 1.0f,
3089 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003090}
3091
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003092LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3094{
3095 const unsigned int width = 2;
3096 const unsigned int height = 2;
3097 const unsigned int channelCount = 2;
3098 const unsigned int batchSize = 2;
3099
3100 unsigned int shape[] = { batchSize, channelCount, height, width };
3101
3102 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3103 3, 3, 3, 3, 4, 4, 4, 4 });
3104
3105 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3106 4, 4, 4, 4, 5, 5, 5, 5 });
3107
3108 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3109 4, 4, 4, 4, 5, 5, 5, 5 });
3110
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003111 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3112 workloadFactory,
3113 memoryManager,
3114 shape,
3115 input0,
3116 shape,
3117 input1,
3118 shape,
3119 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003120}
3121
3122LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3123 armnn::IWorkloadFactory& workloadFactory,
3124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3125{
3126 unsigned int shape0[] = { 1, 2, 2, 2 };
3127 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3128
3129 unsigned int shape1[] = { 1, 1, 1, 1 };
3130 std::vector<float> input1({ 2 });
3131
3132 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3133
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003134 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3135 workloadFactory,
3136 memoryManager,
3137 shape0,
3138 input0,
3139 shape1,
3140 input1,
3141 shape0,
3142 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003143}
3144
3145LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3146 armnn::IWorkloadFactory& workloadFactory,
3147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3148{
3149 const unsigned int shape0[] = { 1, 2, 2, 3 };
3150 const unsigned int shape1[] = { 1, 1, 1, 3 };
3151
3152 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3153 7, 8, 9, 10, 11, 12 });
3154
3155 std::vector<float> input1({ 1, 2, 3});
3156
3157 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003158 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003159
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003160 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3161 workloadFactory,
3162 memoryManager,
3163 shape0,
3164 input0,
3165 shape1,
3166 input1,
3167 shape0,
3168 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003169}
3170
3171LayerTestResult<uint8_t, 4> MaximumUint8Test(
3172 armnn::IWorkloadFactory& workloadFactory,
3173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3174{
3175 unsigned int shape[] = { 2, 2, 2, 2 };
3176
3177 // See dequantized values to the right.
3178 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3179 3, 3, 3, 3, 4, 4, 4, 4 });
3180
3181 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3182 4, 4, 4, 4, 5, 5, 5, 5 });
3183
3184 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3185 4, 4, 4, 4, 5, 5, 5, 5 });
3186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003187 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3188 workloadFactory,
3189 memoryManager,
3190 shape,
3191 input0,
3192 shape,
3193 input1,
3194 shape,
3195 output,
3196 1.0f,
3197 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003198}
3199
3200LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3201 armnn::IWorkloadFactory& workloadFactory,
3202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3203{
3204 const unsigned int shape0[] = { 1, 2, 2, 3 };
3205 const unsigned int shape1[] = { 1, 1, 1, 1 };
3206
3207 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3208 7, 8, 9, 10, 11, 12 });
3209
3210 std::vector<uint8_t> input1({2});
3211
3212 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3213 7, 8, 9, 10, 11, 12 });
3214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003215 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3216 workloadFactory,
3217 memoryManager,
3218 shape0,
3219 input0,
3220 shape1,
3221 input1,
3222 shape0,
3223 output,
3224 1.0f,
3225 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003226}
3227
3228LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3229 armnn::IWorkloadFactory& workloadFactory,
3230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3231{
3232 const unsigned int shape0[] = { 1, 2, 2, 3 };
3233 const unsigned int shape1[] = { 1, 1, 1, 3 };
3234
3235 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3236 7, 8, 9, 10, 11, 12 });
3237
3238 std::vector<uint8_t> input1({ 1, 10, 3});
3239
3240 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3241 7, 10, 9, 10, 11, 12 });
3242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003243 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3244 workloadFactory,
3245 memoryManager,
3246 shape0,
3247 input0,
3248 shape1,
3249 input1,
3250 shape0,
3251 output,
3252 1.0f,
3253 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003254}
3255
Sadik Armagan2999a022019-04-09 14:20:12 +01003256LayerTestResult<int16_t, 4> MaximumInt16Test(
3257 armnn::IWorkloadFactory& workloadFactory,
3258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3259{
3260 unsigned int shape[] = { 2, 2, 2, 2 };
3261
3262 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3263 3, 3, 3, 3, 4, 4, 4, 4 });
3264
3265 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3266 4, 4, 4, 4, 5, 5, 5, 5 });
3267
3268 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3269 4, 4, 4, 4, 5, 5, 5, 5 });
3270
3271 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3272 workloadFactory,
3273 memoryManager,
3274 shape,
3275 input0,
3276 shape,
3277 input1,
3278 shape,
3279 output,
3280 1.0f,
3281 0);
3282}
3283
3284LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3285 armnn::IWorkloadFactory& workloadFactory,
3286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3287{
3288 const unsigned int shape0[] = { 1, 2, 2, 3 };
3289 const unsigned int shape1[] = { 1, 1, 1, 1 };
3290
3291 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3292 7, 8, 9, 10, 11, 12 });
3293
3294 std::vector<int16_t> input1({2});
3295
3296 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3297 7, 8, 9, 10, 11, 12 });
3298
3299 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3300 workloadFactory,
3301 memoryManager,
3302 shape0,
3303 input0,
3304 shape1,
3305 input1,
3306 shape0,
3307 output,
3308 1.0f,
3309 0);
3310}
3311
3312LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3313 armnn::IWorkloadFactory& workloadFactory,
3314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3315{
3316 const unsigned int shape0[] = { 1, 2, 2, 3 };
3317 const unsigned int shape1[] = { 1, 1, 1, 3 };
3318
3319 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3320 7, 8, 9, 10, 11, 12 });
3321
3322 std::vector<int16_t> input1({ 1, 10, 3});
3323
3324 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3325 7, 10, 9, 10, 11, 12 });
3326
3327 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3328 workloadFactory,
3329 memoryManager,
3330 shape0,
3331 input0,
3332 shape1,
3333 input1,
3334 shape0,
3335 output,
3336 1.0f,
3337 0);
3338}
3339
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003340LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3341 armnn::IWorkloadFactory& workloadFactory,
3342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3343{
3344 unsigned int shape0[] = { 1, 2, 2, 2 };
3345 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3346
3347 unsigned int shape1[] = { 1, 1, 1, 1 };
3348 std::vector<float> input1({ 2 });
3349
3350 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3351
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003352 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3353 workloadFactory,
3354 memoryManager,
3355 shape0,
3356 input0,
3357 shape1,
3358 input1,
3359 shape0,
3360 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003361}
3362
3363
3364LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3365 armnn::IWorkloadFactory& workloadFactory,
3366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3367{
3368 unsigned int shape0[] = { 1, 2, 2, 2 };
3369 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3370
3371 unsigned int shape1[] = { 1, 1, 1, 1 };
3372 std::vector<float> input1({ 5 });
3373
3374 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3375
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003376 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3377 workloadFactory,
3378 memoryManager,
3379 shape0,
3380 input0,
3381 shape1,
3382 input1,
3383 shape0,
3384 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003385}
3386
3387LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3388 armnn::IWorkloadFactory & workloadFactory,
3389 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3390{
3391 const unsigned int shape0[] = { 1, 2, 2, 3 };
3392 const unsigned int shape1[] = { 1, 1, 1, 3 };
3393
3394 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3395 7, 1, 2, 3, 4, 5 });
3396
3397 std::vector<uint8_t> input1({ 1, 2, 3});
3398
3399 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3400 1, 1, 2, 1, 2, 3 });
3401
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003402 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3403 workloadFactory,
3404 memoryManager,
3405 shape0,
3406 input0,
3407 shape1,
3408 input1,
3409 shape0,
3410 output,
3411 1.0f,
3412 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003413}
3414
Sadik Armagan2999a022019-04-09 14:20:12 +01003415LayerTestResult<int16_t, 4> MinimumInt16Test(
3416 armnn::IWorkloadFactory& workloadFactory,
3417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3418{
3419 unsigned int shape[] = { 2, 2, 2, 2 };
3420
3421 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3422 3, 3, 3, 3, 4, 4, 4, 4 });
3423
3424 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3425 4, 4, 4, 4, 5, 5, 5, 5 });
3426
3427 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3428 3, 3, 3, 3, 4, 4, 4, 4 });
3429
3430 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3431 workloadFactory,
3432 memoryManager,
3433 shape,
3434 input0,
3435 shape,
3436 input1,
3437 shape,
3438 output,
3439 1.0f,
3440 0);
3441}
3442
3443LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3444 armnn::IWorkloadFactory& workloadFactory,
3445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3446{
3447 const unsigned int shape0[] = { 1, 2, 2, 3 };
3448 const unsigned int shape1[] = { 1, 1, 1, 1 };
3449
3450 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3451 7, 8, 9, 10, 11, 12 });
3452
3453 std::vector<int16_t> input1({2});
3454
3455 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3456 2, 2, 2, 2, 2, 2 });
3457
3458 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3459 workloadFactory,
3460 memoryManager,
3461 shape0,
3462 input0,
3463 shape1,
3464 input1,
3465 shape0,
3466 output,
3467 1.0f,
3468 0);
3469}
3470
3471LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3472 armnn::IWorkloadFactory& workloadFactory,
3473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3474{
3475 const unsigned int shape0[] = { 1, 2, 2, 3 };
3476 const unsigned int shape1[] = { 1, 1, 1, 3 };
3477
3478 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3479 7, 8, 9, 10, 11, 12 });
3480
3481 std::vector<int16_t> input1({ 1, 10, 3});
3482
3483 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3484 1, 8, 3, 1, 10, 3 });
3485
3486 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3487 workloadFactory,
3488 memoryManager,
3489 shape0,
3490 input0,
3491 shape1,
3492 input1,
3493 shape0,
3494 output,
3495 1.0f,
3496 0);
3497}
3498
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003499namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003500LayerTestResult<float,4> MultiplicationTestHelper(
3501 armnn::IWorkloadFactory& workloadFactory,
3502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3503 const unsigned int shape0[4],
3504 const std::vector<float> & values0,
3505 const unsigned int shape1[4],
3506 const std::vector<float> & values1,
3507 const unsigned int outShape[4],
3508 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003509{
surmeh01bceff2f2018-03-29 16:29:27 +01003510 const size_t dimensionCount = 4;
3511 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3512 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3513 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003514
surmeh01bceff2f2018-03-29 16:29:27 +01003515 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3516 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003517
3518 LayerTestResult<float,4> ret(outputTensorInfo);
3519
3520 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3521 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3522 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3523
3524 armnn::MultiplicationQueueDescriptor data;
3525 armnn::WorkloadInfo info;
3526 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3527 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3528 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3529
3530 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3531
3532 inputHandle0->Allocate();
3533 inputHandle1->Allocate();
3534 outputHandle->Allocate();
3535
3536 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3537 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3538
Derek Lambertif30f7d32019-04-09 10:25:02 +01003539 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003540 workload->Execute();
3541
3542 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3543
surmeh01bceff2f2018-03-29 16:29:27 +01003544 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003545 return ret;
3546}
surmeh01bceff2f2018-03-29 16:29:27 +01003547} // anonymous namespace
3548
3549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003550LayerTestResult<float,4> MultiplicationTest(
3551 armnn::IWorkloadFactory& workloadFactory,
3552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003553{
3554 const unsigned int width = 2;
3555 const unsigned int height = 2;
3556 const unsigned int channelCount = 2;
3557 const unsigned int batchSize = 2;
3558
3559 unsigned int shape[] = { batchSize, channelCount, height, width };
3560
3561 std::vector<float> input0({
3562 1, 1, 1, 1, 2, 2, 2, 2,
3563 3, 3, 3, 3, 4, 4, 4, 4 });
3564
3565 std::vector<float> input1({
3566 2, 2, 2, 2, 3, 3, 3, 3,
3567 4, 4, 4, 4, 5, 5, 5, 5 });
3568
3569 std::vector<float> output({
3570 2, 2, 2, 2, 6, 6, 6, 6,
3571 12, 12, 12, 12, 20, 20, 20, 20 });
3572
3573 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003574 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003575 shape,
3576 input0,
3577 shape,
3578 input1,
3579 shape,
3580 output);
3581}
3582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003583LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3584 armnn::IWorkloadFactory& workloadFactory,
3585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003586{
3587 unsigned int shape0[] = { 1, 2, 2, 2 };
3588 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3589
3590 unsigned int shape1[] = { 1, 1, 1, 1 };
3591 std::vector<float> input1({ 2 });
3592
3593 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3594
3595 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003596 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003597 shape0,
3598 input0,
3599 shape1,
3600 input1,
3601 shape0,
3602 output);
3603}
3604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003605LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3606 armnn::IWorkloadFactory& workloadFactory,
3607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003608{
3609 unsigned int shape0[] = { 1, 3, 3, 2 };
3610 std::vector<float> input0({
3611 1, 2, 3, 4, 5, 6,
3612 7, 8, 9, 10, 11, 12,
3613 13, 14, 15, 16, 17, 18});
3614
3615 unsigned int shape1[] = { 1, 1, 1, 2 };
3616 std::vector<float> input1({ 1, 2 });
3617
3618 std::vector<float> output({
3619 1, 4, 3, 8, 5, 12,
3620 7, 16, 9, 20, 11, 24,
3621 13, 28, 15, 32, 17, 36});
3622
3623 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003624 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003625 shape0,
3626 input0,
3627 shape1,
3628 input1,
3629 shape0,
3630 output);
3631}
telsoa014fcda012018-03-09 14:13:49 +00003632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003633LayerTestResult<float,4> CompareMultiplicationTest(
3634 armnn::IWorkloadFactory& workloadFactory,
3635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3636 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003637{
3638 const unsigned int width = 16;
3639 const unsigned int height = 32;
3640 const unsigned int channelCount = 2;
3641 const unsigned int batchSize = 5;
3642
3643 armnn::TensorInfo inputTensorInfo0;
3644 armnn::TensorInfo inputTensorInfo1;
3645 armnn::TensorInfo outputTensorInfo;
3646
3647 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3648
3649 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3650 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3651 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3652
3653 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3654
3655 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3656 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3657
3658 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3659 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3660 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3661
3662 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3663 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3664 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3665
3666 armnn::MultiplicationQueueDescriptor data;
3667 armnn::WorkloadInfo info;
3668 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3669 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3670 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3671
3672 armnn::MultiplicationQueueDescriptor refData = data;
3673 armnn::WorkloadInfo refInfo = info;
3674 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3675 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3676 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3677
3678 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3679 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3680
3681 inputHandle0->Allocate();
3682 inputHandle1->Allocate();
3683 outputHandle->Allocate();
3684 inputHandle0Ref->Allocate();
3685 inputHandle1Ref->Allocate();
3686 outputHandleRef->Allocate();
3687
3688 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3689 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3690 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3691 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3692
Derek Lambertif30f7d32019-04-09 10:25:02 +01003693 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003694 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003695 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003696 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003697 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3698 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3699
3700 return comparisonResult;
3701}
3702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003703LayerTestResult<float,4> CompareBatchNormTest(
3704 armnn::IWorkloadFactory& workloadFactory,
3705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3706 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003707{
3708 const unsigned int width = 2;
3709 const unsigned int height = 3;
3710 const unsigned int channels = 5;
3711 const unsigned int batchSize = 3;
3712
3713 armnn::TensorInfo inputTensorInfo;
3714 armnn::TensorInfo outputTensorInfo;
3715 armnn::TensorInfo tensorInfo;
3716
3717 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3718 constexpr unsigned int tensorShape[] = {channels};
3719
3720 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3721 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3722 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3723
3724 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3725
3726 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3727 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3728 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3729 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3730
3731 LayerTestResult<float,4> ret(outputTensorInfo);
3732
3733 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3734 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3735
3736 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3737 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3738
3739 armnn::BatchNormalizationQueueDescriptor data;
3740 armnn::WorkloadInfo info;
3741 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3742 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3743 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3744 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3745
3746 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3747 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3748 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3749 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3750
3751 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3752 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3753 data.m_Mean = &meanTensor;
3754 data.m_Variance = &varianceTensor;
3755 data.m_Beta = &betaTensor;
3756 data.m_Gamma = &gammaTensor;
3757 data.m_Parameters.m_Eps = 0.01f;
3758
3759 armnn::BatchNormalizationQueueDescriptor refData = data;
3760 armnn::WorkloadInfo refInfo = info;
3761 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3762 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3763
3764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3765 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3766
3767 inputHandle->Allocate();
3768 outputHandle->Allocate();
3769 inputHandleRef->Allocate();
3770 outputHandleRef->Allocate();
3771
3772 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3773 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3774
Derek Lambertif30f7d32019-04-09 10:25:02 +01003775 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003776 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003777 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003778 workloadRef->Execute();
3779
3780 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3781 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3782
3783 return ret;
3784}
3785
surmeh013537c2c2018-05-18 16:31:43 +01003786template<typename T>
3787void PermuteTensorData(
3788 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003790 const armnn::PermutationVector& mappings,
3791 armnn::TensorInfo & inputTensorInfo,
3792 const T * inputData,
3793 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003794{
surmeh013537c2c2018-05-18 16:31:43 +01003795 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3796 if (inputData == nullptr)
3797 {
3798 // Nullptr is an error in the test. By returning without doing the concatenation
3799 // I expect the caller to fail the test. It still makes sense to report this as
3800 // an assert for Debug builds.
3801 return;
3802 }
telsoa014fcda012018-03-09 14:13:49 +00003803
surmeh013537c2c2018-05-18 16:31:43 +01003804 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3805
3806 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3807 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3808
3809 armnn::PermuteQueueDescriptor queueDescriptor;
3810 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3811 armnn::WorkloadInfo workloadInfo;
3812 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3813 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3814
3815 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3816
3817 inputHandle->Allocate();
3818 outputHandle->Allocate();
3819
3820 CopyDataToITensorHandle(inputHandle.get(), inputData);
3821
Derek Lambertif30f7d32019-04-09 10:25:02 +01003822 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003823 workload->Execute();
3824
3825 outputData.resize(outputTensorInfo.GetNumElements());
3826 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3827 inputTensorInfo = outputTensorInfo;
3828}
3829
Jim Flynn825af452019-05-20 12:49:28 +01003830armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003831 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3832 unsigned int concatDim)
3833{
telsoa014fcda012018-03-09 14:13:49 +00003834 std::vector<armnn::TensorShape> shapes;
3835 shapes.reserve(inputTensorInfos.size());
3836 for (const armnn::TensorInfo& it: inputTensorInfos)
3837 {
3838 shapes.push_back(it.GetShape());
3839 }
surmeh013537c2c2018-05-18 16:31:43 +01003840
Jim Flynn825af452019-05-20 12:49:28 +01003841 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3842 shapes.end(),
3843 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003844}
3845
3846//
narpra015cdda352018-11-19 15:30:27 +00003847// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3848// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3849// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003850//
3851
3852bool NeedPermuteForConcat(
3853 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3854 unsigned int concatDim)
3855{
3856 // See note above. Additionally we expect the input shapes to have the
3857 // same number of dimensions.
3858 unsigned int nDimensions = 0;
3859
telsoa01c577f2c2018-08-31 09:22:23 +01003860 // Determine the number of dimensions as well as sanity check them
3861 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003862 for (auto && tensorInfo : inputTensorInfos)
3863 {
3864 if (!nDimensions)
3865 {
3866 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3867 }
3868 else
3869 {
3870 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3871 "Input shapes must have the same number of dimensions");
3872 }
3873 }
3874
narpra015cdda352018-11-19 15:30:27 +00003875 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003876}
3877
3878armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3879{
3880 unsigned int numDims = inputShape.GetNumDimensions();
3881 if (numDims >= 3)
3882 {
3883 // Nothing to do if the inputShape has at least 3 dimensions.
3884 return inputShape;
3885 }
3886
3887 std::vector<unsigned int> newDims(size_t(3), 1u);
3888 unsigned int expandedBy = 3 - numDims;
3889 for (unsigned int i=0; i<numDims; ++i)
3890 {
3891 newDims[expandedBy+i] = inputShape[i];
3892 }
3893 return armnn::TensorShape(3u, &newDims[0]);
3894}
3895
3896void Generate3dPermuteVectorForConcat(
3897 unsigned int numDimensions,
3898 unsigned int & concatDim,
3899 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3900{
3901 BOOST_ASSERT_MSG(numDimensions <= 3,
3902 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003903 unsigned int expandedBy = 3 - numDimensions;
3904 unsigned int expandedConcatAxis = concatDim + expandedBy;
3905
3906 if (expandedConcatAxis == 2)
3907 {
3908 concatDim = 0;
3909 armnn::PermutationVector forwardPermutation({1, 2, 0});
3910 armnn::PermutationVector reversePermutation({2, 0, 1});
3911 permutations = std::make_pair(forwardPermutation, reversePermutation);
3912 }
3913 else if (expandedConcatAxis == 1)
3914 {
3915 concatDim = 0;
3916 armnn::PermutationVector forwardPermutation({2, 0, 1});
3917 armnn::PermutationVector reversePermutation({1, 2, 0});
3918 permutations = std::make_pair(forwardPermutation, reversePermutation);
3919 }
3920 else
3921 {
3922 BOOST_ASSERT(expandedConcatAxis == 0);
3923 concatDim = 0;
3924 }
3925}
3926
3927//
3928// Permute the input tensors so we can do a supported concatenation.
3929// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3930// at the front. Finally this function tells what the output shape
3931// of the permuted concatenated tensor is going to be.
3932//
3933template <typename T>
3934void PermuteInputsForConcat(
3935 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003937 std::vector<armnn::TensorInfo> & inputTensorInfos,
3938 std::vector<T *> & inputData,
3939 std::vector<std::vector<T>> & inputDataStorage,
3940 armnn::PermutationVector & permuteVector,
3941 unsigned int & concatDim,
3942 armnn::TensorInfo & outputTensorInfo)
3943{
3944 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3945 "Expecting more than one tensor to be concatenated here");
3946
3947 unsigned int numDims = 0;
3948 unsigned int nthInput = 0;
3949 const armnn::PermutationVector identity({0, 1, 2});
3950
3951 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3952 std::make_pair(identity, identity);
3953
3954 inputDataStorage.resize(inputData.size());
3955
3956 for (auto && tensorInfo : inputTensorInfos)
3957 {
3958 if (numDims == 0)
3959 {
3960 numDims = tensorInfo.GetShape().GetNumDimensions();
3961 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003962
telsoa01c577f2c2018-08-31 09:22:23 +01003963 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003964 permuteVector = permutations.second;
3965 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3966 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3967 }
3968 else
3969 {
3970 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3971 "All inputs must have the same number of dimensions");
3972 }
3973
3974 armnn::TensorInfo newTensorInfo = tensorInfo;
3975 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3976
3977 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003978 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003979 permutations.first,
3980 newTensorInfo,
3981 inputData[nthInput],
3982 inputDataStorage[nthInput]);
3983
3984 inputData[nthInput] = inputDataStorage[nthInput].data();
3985 inputTensorInfos[nthInput] = newTensorInfo;
3986
3987 ++nthInput;
3988 }
3989
3990 outputTensorInfo.SetShape(
3991 armnnUtils::Permuted(
3992 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3993 permutations.first));
3994}
3995
3996
3997//
3998// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003999// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004000// output.
4001//
4002template <typename T>
4003void PermuteOutputForConcat(
4004 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004006 const armnn::TensorInfo & tensorInfo,
4007 const armnn::PermutationVector & permuteVector,
4008 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4009 T * data)
4010{
4011 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4012 if (data == nullptr)
4013 {
4014 // Nullptr is an error in the test. By returning without doing the permutation
4015 // I expect the caller to fail the test. It still makes sense to report this as
4016 // an assert for Debug builds.
4017 return;
4018 }
4019
4020 armnn::TensorInfo resultTensorInfo = tensorInfo;
4021 std::vector<T> inputData(tensorInfo.GetNumElements());
4022 std::vector<T> outputData;
4023
4024 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4025
4026 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004027 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004028 permuteVector,
4029 resultTensorInfo,
4030 &inputData[0],
4031 outputData);
4032
4033 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4034}
4035
4036template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004037void Concatenate(
4038 armnn::IWorkloadFactory& workloadFactory,
4039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4040 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4041 std::initializer_list<T *> inputsOrig,
4042 const armnn::TensorInfo& outputTensorInfoOrig,
4043 T * output,
narpra015cdda352018-11-19 15:30:27 +00004044 unsigned int concatDim,
4045 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004046{
4047 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4048 if (output == nullptr)
4049 {
4050 // Nullptr is an error in the test. By returning without doing the permutation
4051 // I expect the caller to fail the test. It still makes sense to report this as
4052 // an assert for Debug builds.
4053 return;
4054 }
4055
telsoa01c577f2c2018-08-31 09:22:23 +01004056 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004057 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4058 std::vector<T *> inputs = inputsOrig;
4059 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4060
4061 armnn::PermutationVector permuteVector{0, 1, 2};
4062
telsoa01c577f2c2018-08-31 09:22:23 +01004063 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004064 std::vector<std::vector<T>> tmpInputDataStorage;
4065
4066 const size_t inputCount = inputTensorInfos.size();
4067
4068 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4069
4070 if (needPermuteForConcat)
4071 {
4072 //
4073 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004074 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004075 //
4076 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004077 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004078 inputTensorInfos,
4079 inputs,
4080 tmpInputDataStorage,
4081 permuteVector,
4082 concatDim,
4083 outputTensorInfo);
4084 }
4085
narpra015cdda352018-11-19 15:30:27 +00004086 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004087
4088 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4089 inputHandles.reserve(inputCount);
4090
narpra015cdda352018-11-19 15:30:27 +00004091 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4092
Jim Flynne242f2d2019-05-22 14:24:13 +01004093 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004094 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004095 queueDescriptor.m_Parameters = viewsDescriptor;
4096
4097 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004098 {
narpra015cdda352018-11-19 15:30:27 +00004099 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4100 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4101 {
4102 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4103 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4104 }
telsoa014fcda012018-03-09 14:13:49 +00004105
narpra015cdda352018-11-19 15:30:27 +00004106 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004107
narpra015cdda352018-11-19 15:30:27 +00004108 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4109 for (unsigned int i = 0; i < inputCount; ++i)
4110 {
4111 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4112 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4113 subTensorsSupported ?
4114 workloadFactory.CreateSubTensorHandle(*outputHandle,
4115 inputTensorInfo.GetShape(),
4116 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4117 workloadFactory.CreateTensorHandle(inputTensorInfo);
4118
4119 inputHandles.emplace_back(std::move(inputHandle));
4120 }
4121
telsoa014fcda012018-03-09 14:13:49 +00004122 }
narpra015cdda352018-11-19 15:30:27 +00004123 else
4124 {
4125 for (unsigned int i = 0; i < inputCount; ++i)
4126 {
4127 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4128 inputHandles.emplace_back(std::move(inputHandle));
4129 }
4130 }
telsoa014fcda012018-03-09 14:13:49 +00004131
4132 for (unsigned int i = 0; i < inputCount; ++i)
4133 {
surmeh013537c2c2018-05-18 16:31:43 +01004134 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004135 }
4136
4137 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4138
Jim Flynn4ed6c832019-05-20 11:02:46 +01004139 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004140
4141 for (auto& inputHandle : inputHandles)
4142 {
4143 inputHandle->Allocate();
4144 }
4145
4146 outputHandle->Allocate();
4147
4148 unsigned int nextInputId = 0;
4149 for (auto& inputHandle : inputHandles)
4150 {
surmeh013537c2c2018-05-18 16:31:43 +01004151 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4152 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004153 }
4154
Derek Lambertif30f7d32019-04-09 10:25:02 +01004155 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004156 workload->Execute();
4157
surmeh013537c2c2018-05-18 16:31:43 +01004158 if (needPermuteForConcat)
4159 {
4160 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004161 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004162 outputTensorInfo,
4163 permuteVector,
4164 std::move(outputHandle),
4165 output);
4166 }
4167 else
4168 {
4169 CopyDataFromITensorHandle(output, outputHandle.get());
4170 }
telsoa014fcda012018-03-09 14:13:49 +00004171}
4172
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004173template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004174LayerTestResult<T, 1> Concatenation1dTestImpl(
4175 armnn::IWorkloadFactory& workloadFactory,
4176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4177 float qScale,
4178 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004179{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004180 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004181
4182 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4183 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4184 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4185
Jim Flynncbb66aa2019-05-15 13:03:54 +01004186 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004187
4188 LayerTestResult<T, 1> result(outputTensorInfo);
4189
4190 std::vector<T> output;
4191 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004192 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004193 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4194 { input0.data(), input1.data(), input2.data() },
4195 outputTensorInfo,
4196 output.data(),
4197 0,
4198 true);
telsoa014fcda012018-03-09 14:13:49 +00004199
4200 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4201 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4202 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4203 }));
4204
4205 return result;
4206}
4207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004208LayerTestResult<float, 1> Concatenation1dTest(
4209 armnn::IWorkloadFactory& workloadFactory,
4210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004211{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004212 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004213}
4214
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004215template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004216LayerTestResult<T, 2> Concatenation2dTestImpl(
4217 armnn::IWorkloadFactory& workloadFactory,
4218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004219 const armnn::TensorInfo& outputTensorInfo,
4220 unsigned int dimension,
4221 const float qScale,
4222 const int32_t qOffset)
4223{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004224 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004225
4226 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4227 // Batch 0
4228 1.0f, 2.0f, 3.0f,
4229
4230 // Batch 1
4231 10.0f, 11.0f, 12.0f,
4232 }));
4233
4234 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4235 // Batch 0
4236 4.0f, 5.0f, 6.0f,
4237
4238 // Batch 1
4239 13.0f, 14.0f, 15.0f,
4240 }));
4241
4242 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4243 // Batch 0
4244 7.0f, 8.0f, 9.0f,
4245
4246 // Batch 1
4247 16.0f, 17.0f, 18.0f,
4248 }));
4249
4250 LayerTestResult<T, 2> result(outputTensorInfo);
4251
4252 std::vector<T> output;
4253 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004254 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004255 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4256 { input0.data(), input1.data(), input2.data() },
4257 outputTensorInfo,
4258 output.data(),
4259 dimension,
4260 true);
telsoa014fcda012018-03-09 14:13:49 +00004261
4262 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4263 return result;
4264}
4265
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004266template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004267LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4268 armnn::IWorkloadFactory& workloadFactory,
4269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4270 float qScale,
4271 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004272{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004273 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004275 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4276 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4277
telsoa014fcda012018-03-09 14:13:49 +00004278 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4279 // Batch 0
4280 1.0f, 2.0f, 3.0f,
4281
4282 // Batch 1
4283 10.0f, 11.0f, 12.0f,
4284
4285 // Batch 2
4286 4.0f, 5.0f, 6.0f,
4287
4288 // Batch 3
4289 13.0f, 14.0f, 15.0f,
4290
4291 // Batch 4
4292 7.0f, 8.0f, 9.0f,
4293
4294 // Batch 5
4295 16.0f, 17.0f, 18.0f,
4296 }));
4297
4298 return result;
4299}
4300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004301LayerTestResult<float, 2> Concatenation2dDim0Test(
4302 armnn::IWorkloadFactory& workloadFactory,
4303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004304{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004305 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004306}
4307
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004308template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004309LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4310 armnn::IWorkloadFactory& workloadFactory,
4311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4312 float qScale,
4313 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004314{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004315 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004317 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4318 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4319
telsoa014fcda012018-03-09 14:13:49 +00004320 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4321 // Batch 0
4322 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4323
4324 // Batch 1
4325 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4326 }));
4327
4328 return result;
4329}
4330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004331LayerTestResult<float, 2> Concatenation2dDim1Test(
4332 armnn::IWorkloadFactory& workloadFactory,
4333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004334{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004335 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004336}
4337
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004338template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004339LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4340 armnn::IWorkloadFactory& workloadFactory,
4341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4342 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004343 int32_t qOffset)
4344{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004345 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004346 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4347 // Batch 0
4348 1.0f, 2.0f, 3.0f,
4349
4350 // Batch 1
4351 10.0f, 11.0f, 12.0f,
4352 }));
4353
Jim Flynncbb66aa2019-05-15 13:03:54 +01004354 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004355 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4356 // Batch 0
4357 4.0f, 5.0f, 6.0f,
4358
4359 // Batch 1
4360 13.0f, 14.0f, 15.0f,
4361
4362 // Batch 0
4363 7.0f, 8.0f, 9.0f,
4364 }));
4365
Jim Flynncbb66aa2019-05-15 13:03:54 +01004366 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004367 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4368 // Batch 1
4369 16.0f, 17.0f, 18.0f,
4370 }));
4371
Jim Flynncbb66aa2019-05-15 13:03:54 +01004372 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004373 LayerTestResult<T, 2> result(outputTensorInfo);
4374
4375 std::vector<T> output;
4376 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004377 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004378 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4379 { input0.data(), input1.data(), input2.data() },
4380 outputTensorInfo,
4381 output.data(),
4382 0,
4383 true);
telsoa014fcda012018-03-09 14:13:49 +00004384
4385 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4386 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4387 // Batch 0
4388 1.0f, 2.0f, 3.0f,
4389
4390 // Batch 1
4391 10.0f, 11.0f, 12.0f,
4392
4393 // Batch 2
4394 4.0f, 5.0f, 6.0f,
4395
4396 // Batch 3
4397 13.0f, 14.0f, 15.0f,
4398
4399 // Batch 4
4400 7.0f, 8.0f, 9.0f,
4401
4402 // Batch 5
4403 16.0f, 17.0f, 18.0f,
4404 }));
4405
4406 return result;
4407}
4408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004409LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4410 armnn::IWorkloadFactory& workloadFactory,
4411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004412{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004413 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4414 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004415}
4416
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004417template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004418LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4419 armnn::IWorkloadFactory& workloadFactory,
4420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4421 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004422 int32_t qOffset)
4423{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004424 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004425 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4426 // Batch 0
4427 1.0f, 2.0f, 3.0f,
4428
4429 // Batch 1
4430 10.0f, 11.0f, 12.0f,
4431 }));
4432
Jim Flynncbb66aa2019-05-15 13:03:54 +01004433 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004434 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4435 // Batch 0
4436 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4437
4438 // Batch 1
4439 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4440 }));
4441
Jim Flynncbb66aa2019-05-15 13:03:54 +01004442 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004443 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4444 // Batch 0
4445 9.0f,
4446
4447 // Batch 1
4448 18.0f
4449 }));
4450
Jim Flynncbb66aa2019-05-15 13:03:54 +01004451 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004452 LayerTestResult<T, 2> result(outputTensorInfo);
4453
4454 std::vector<T> output;
4455 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004456 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004457 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4458 { input0.data(), input1.data(), input2.data() },
4459 outputTensorInfo,
4460 output.data(),
4461 1,
4462 true);
telsoa014fcda012018-03-09 14:13:49 +00004463
4464 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4465 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4466 // Batch 0
4467 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4468
4469 // Batch 1
4470 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4471 }));
4472
4473 return result;
4474}
4475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004476LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4477 armnn::IWorkloadFactory& workloadFactory,
4478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004480 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4481 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004482}
4483
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004484template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004485LayerTestResult<T, 3> Concatenation3dTestImpl(
4486 armnn::IWorkloadFactory& workloadFactory,
4487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004488 const armnn::TensorInfo& outputTensorInfo,
4489 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004490 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004491 float qScale,
4492 int32_t qOffset)
4493{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004494 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004495
4496 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4497 // Batch 0, Channel 0
4498 1.0f, 2.0f,
4499
4500 // Batch 0, Channel 1
4501 3.0f, 4.0f,
4502
4503 // Batch 0, Channel 2
4504 5.0f, 6.0f,
4505
4506 // Batch 1, Channel 0
4507 19.0f, 20.0f,
4508
4509 // Batch 1, Channel 1
4510 21.0f, 22.0f,
4511
4512 // Batch 1, Channel 2
4513 23.0f, 24.0f
4514 }));
4515
4516 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4517 // Batch 0, Channel 0
4518 7.0f, 8.0f,
4519
4520 // Batch 0, Channel 1
4521 9.0f, 10.0f,
4522
4523 // Batch 0, Channel 2
4524 11.0f, 12.0f,
4525
4526 // Batch 1, Channel 0
4527 25.0f, 26.0f,
4528
4529 // Batch 1, Channel 1
4530 27.0f, 28.0f,
4531
4532 // Batch 1, Channel 2
4533 29.0f, 30.0f
4534 }));
4535
4536 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4537 // Batch 0, Channel 0
4538 13.0f, 14.0f,
4539
4540 // Batch 0, Channel 1
4541 15.0f, 16.0f,
4542
4543 // Batch 0, Channel 2
4544 17.0f, 18.0f,
4545
4546 // Batch 1, Channel 0
4547 31.0f, 32.0f,
4548
4549 // Batch 1, Channel 1
4550 33.0f, 34.0f,
4551
4552 // Batch 1, Channel 2
4553 35.0f, 36.0f
4554 }));
4555
4556 LayerTestResult<T, 3> result(outputTensorInfo);
4557
4558 std::vector<T> output;
4559 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004560 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004561 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4562 { input0.data(), input1.data(), input2.data() },
4563 outputTensorInfo,
4564 output.data(),
4565 dimension,
4566 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004567
4568 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4569 return result;
4570}
4571
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004572template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004573LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4574 armnn::IWorkloadFactory& workloadFactory,
4575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4576 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004577 int32_t qOffset)
4578{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004579 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004580
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004581 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4582 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4583
telsoa014fcda012018-03-09 14:13:49 +00004584 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4585 // Batch 0, Channel 0
4586 1.0f, 2.0f,
4587
4588 // Batch 0, Channel 1
4589 3.0f, 4.0f,
4590
4591 // Batch 0, Channel 2
4592 5.0f, 6.0f,
4593
4594 // Batch 1, Channel 0
4595 19.0f, 20.0f,
4596
4597 // Batch 1, Channel 1
4598 21.0f, 22.0f,
4599
4600 // Batch 1, Channel 2
4601 23.0f, 24.0f,
4602
4603 // Batch 2, Channel 0
4604 7.0f, 8.0f,
4605
4606 // Batch 2, Channel 1
4607 9.0f, 10.0f,
4608
4609 // Batch 2, Channel 2
4610 11.0f, 12.0f,
4611
4612 // Batch 3, Channel 0
4613 25.0f, 26.0f,
4614
4615 // Batch 3, Channel 1
4616 27.0f, 28.0f,
4617
4618 // Batch 3, Channel 2
4619 29.0f, 30.0f,
4620
4621 // Batch 4, Channel 0
4622 13.0f, 14.0f,
4623
4624 // Batch 4, Channel 1
4625 15.0f, 16.0f,
4626
4627 // Batch 4, Channel 2
4628 17.0f, 18.0f,
4629
4630 // Batch 5, Channel 0
4631 31.0f, 32.0f,
4632
4633 // Batch 5, Channel 1
4634 33.0f, 34.0f,
4635
4636 // Batch 5, Channel 2
4637 35.0f, 36.0f
4638 }));
narpra015cdda352018-11-19 15:30:27 +00004639
telsoa014fcda012018-03-09 14:13:49 +00004640 return result;
4641}
4642
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004643LayerTestResult<float, 3> Concatenation3dDim0Test(
4644 armnn::IWorkloadFactory& workloadFactory,
4645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004647 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004648}
4649
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004650template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004651LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4652 armnn::IWorkloadFactory& workloadFactory,
4653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4654 float qScale,
4655 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004656{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004657 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004658
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004659 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4660 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004661
telsoa014fcda012018-03-09 14:13:49 +00004662 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4663 // Batch 0, Channel 0
4664 1.0f, 2.0f,
4665
4666 // Batch 0, Channel 1
4667 3.0f, 4.0f,
4668
4669 // Batch 0, Channel 2
4670 5.0f, 6.0f,
4671
4672 // Batch 0, Channel 3
4673 7.0f, 8.0f,
4674
4675 // Batch 0, Channel 4
4676 9.0f, 10.0f,
4677
4678 // Batch 0, Channel 5
4679 11.0f, 12.0f,
4680
4681 // Batch 0, Channel 6
4682 13.0f, 14.0f,
4683
4684 // Batch 0, Channel 7
4685 15.0f, 16.0f,
4686
4687 // Batch 0, Channel 8
4688 17.0f, 18.0f,
4689
4690 // Batch 1, Channel 0
4691 19.0f, 20.0f,
4692
4693 // Batch 1, Channel 1
4694 21.0f, 22.0f,
4695
4696 // Batch 1, Channel 2
4697 23.0f, 24.0f,
4698
4699 // Batch 1, Channel 3
4700 25.0f, 26.0f,
4701
4702 // Batch 1, Channel 4
4703 27.0f, 28.0f,
4704
4705 // Batch 1, Channel 5
4706 29.0f, 30.0f,
4707
4708 // Batch 1, Channel 6
4709 31.0f, 32.0f,
4710
4711 // Batch 1, Channel 7
4712 33.0f, 34.0f,
4713
4714 // Batch 1, Channel 8
4715 35.0f, 36.0f
4716 }));
4717
4718 return result;
4719}
4720
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004721LayerTestResult<float, 3> Concatenation3dDim1Test(
4722 armnn::IWorkloadFactory& workloadFactory,
4723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004724{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004725 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004726}
4727
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004728template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004729LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4730 armnn::IWorkloadFactory& workloadFactory,
4731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004732 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004733 float qScale,
4734 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004735{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004736 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004737
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004738 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4739 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004740
telsoa014fcda012018-03-09 14:13:49 +00004741 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4742 // Batch 0, Channel 0
4743 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4744
4745 // Batch 0, Channel 1
4746 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4747
4748 // Batch 0, Channel 2
4749 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4750
4751 // Batch 1, Channel 0
4752 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4753
4754 // Batch 1, Channel 1
4755 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4756
4757 // Batch 1, Channel 2
4758 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4759 }));
4760
4761 return result;
4762}
4763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764LayerTestResult<float, 3> Concatenation3dDim2Test(
4765 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4767 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004769 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4770 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004771}
4772
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004773template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004774LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4775 armnn::IWorkloadFactory& workloadFactory,
4776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4777 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004778 int32_t qOffset)
4779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004780 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004781 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4782 // Batch 0, Channel 0
4783 1.0f, 2.0f,
4784
4785 // Batch 0, Channel 1
4786 3.0f, 4.0f,
4787
4788 // Batch 0, Channel 2
4789 5.0f, 6.0f,
4790
4791 // Batch 1, Channel 0
4792 19.0f, 20.0f,
4793
4794 // Batch 1, Channel 1
4795 21.0f, 22.0f,
4796
4797 // Batch 1, Channel 2
4798 23.0f, 24.0f
4799 }));
4800
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004801 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004802 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4803 // Batch 0, Channel 0
4804 7.0f, 8.0f,
4805
4806 // Batch 0, Channel 1
4807 9.0f, 10.0f,
4808
4809 // Batch 0, Channel 2
4810 11.0f, 12.0f,
4811 }));
4812
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004813 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004814 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4815 // Batch 0, Channel 0
4816 25.0f, 26.0f,
4817
4818 // Batch 0, Channel 1
4819 27.0f, 28.0f,
4820
4821 // Batch 0, Channel 2
4822 29.0f, 30.0f,
4823
4824 // Batch 1, Channel 0
4825 13.0f, 14.0f,
4826
4827 // Batch 1, Channel 1
4828 15.0f, 16.0f,
4829
4830 // Batch 1, Channel 2
4831 17.0f, 18.0f,
4832
4833 // Batch 2, Channel 0
4834 31.0f, 32.0f,
4835
4836 // Batch 2, Channel 1
4837 33.0f, 34.0f,
4838
4839 // Batch 2, Channel 2
4840 35.0f, 36.0f
4841 }));
4842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004843 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004844 LayerTestResult<T, 3> result(outputTensorInfo);
4845
4846 std::vector<T> output;
4847 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004848 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004849 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4850 { input0.data(), input1.data(), input2.data() },
4851 outputTensorInfo,
4852 output.data(),
4853 0,
4854 true);
telsoa014fcda012018-03-09 14:13:49 +00004855
4856 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4857 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4858 // Batch 0, Channel 0
4859 1.0f, 2.0f,
4860
4861 // Batch 0, Channel 1
4862 3.0f, 4.0f,
4863
4864 // Batch 0, Channel 2
4865 5.0f, 6.0f,
4866
4867 // Batch 1, Channel 0
4868 19.0f, 20.0f,
4869
4870 // Batch 1, Channel 1
4871 21.0f, 22.0f,
4872
4873 // Batch 1, Channel 2
4874 23.0f, 24.0f,
4875
4876 // Batch 2, Channel 0
4877 7.0f, 8.0f,
4878
4879 // Batch 2, Channel 1
4880 9.0f, 10.0f,
4881
4882 // Batch 2, Channel 2
4883 11.0f, 12.0f,
4884
4885 // Batch 3, Channel 0
4886 25.0f, 26.0f,
4887
4888 // Batch 3, Channel 1
4889 27.0f, 28.0f,
4890
4891 // Batch 3, Channel 2
4892 29.0f, 30.0f,
4893
4894 // Batch 4, Channel 0
4895 13.0f, 14.0f,
4896
4897 // Batch 4, Channel 1
4898 15.0f, 16.0f,
4899
4900 // Batch 4, Channel 2
4901 17.0f, 18.0f,
4902
4903 // Batch 5, Channel 0
4904 31.0f, 32.0f,
4905
4906 // Batch 5, Channel 1
4907 33.0f, 34.0f,
4908
4909 // Batch 5, Channel 2
4910 35.0f, 36.0f
4911 }));
4912
4913 return result;
4914}
4915
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004916LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4917 armnn::IWorkloadFactory& workloadFactory,
4918 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004919{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004920 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4921 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004922}
4923
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004924template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004925LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4926 armnn::IWorkloadFactory& workloadFactory,
4927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4928 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004929 int32_t qOffset)
4930{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004931 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004932 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4933 // Batch 0, Channel 0
4934 1.0f, 2.0f,
4935
4936 // Batch 0, Channel 1
4937 3.0f, 4.0f,
4938
4939 // Batch 0, Channel 2
4940 5.0f, 6.0f,
4941
4942 // Batch 1, Channel 0
4943 19.0f, 20.0f,
4944
4945 // Batch 1, Channel 1
4946 21.0f, 22.0f,
4947
4948 // Batch 1, Channel 2
4949 23.0f, 24.0f
4950 }));
4951
Jim Flynncbb66aa2019-05-15 13:03:54 +01004952 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004953 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4954 // Batch 0, Channel 0
4955 7.0f, 8.0f,
4956
4957 // Batch 0, Channel 1
4958 9.0f, 10.0f,
4959
4960 // Batch 0, Channel 2
4961 11.0f, 12.0f,
4962
4963 // Batch 0, Channel 3
4964 25.0f, 26.0f,
4965
4966 // Batch 1, Channel 0
4967 27.0f, 28.0f,
4968
4969 // Batch 1, Channel 1
4970 29.0f, 30.0f,
4971
4972 // Batch 1, Channel 2
4973 13.0f, 14.0f,
4974
4975 // Batch 1, Channel 3
4976 15.0f, 16.0f,
4977 }));
4978
Jim Flynncbb66aa2019-05-15 13:03:54 +01004979 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004980 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4981 // Batch 0, Channel 0
4982 17.0f, 18.0f,
4983
4984 // Batch 1, Channel 0
4985 31.0f, 32.0f,
4986 }));
4987
Jim Flynncbb66aa2019-05-15 13:03:54 +01004988 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004989 LayerTestResult<T, 3> result(outputTensorInfo);
4990
4991 std::vector<T> output;
4992 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004993 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004994 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4995 { input0.data(), input1.data(), input2.data() },
4996 outputTensorInfo,
4997 output.data(),
4998 1,
4999 true);
telsoa014fcda012018-03-09 14:13:49 +00005000
5001 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5002 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5003 // Batch 0, Channel 0
5004 1.0f, 2.0f,
5005
5006 // Batch 0, Channel 1
5007 3.0f, 4.0f,
5008
5009 // Batch 0, Channel 2
5010 5.0f, 6.0f,
5011
5012 // Batch 0, Channel 3
5013 7.0f, 8.0f,
5014
5015 // Batch 0, Channel 4
5016 9.0f, 10.0f,
5017
5018 // Batch 0, Channel 5
5019 11.0f, 12.0f,
5020
5021 // Batch 0, Channel 6
5022 25.0f, 26.0f,
5023
5024 // Batch 0, Channel 7
5025 17.0f, 18.0f,
5026
5027 // Batch 1, Channel 0
5028 19.0f, 20.0f,
5029
5030 // Batch 1, Channel 1
5031 21.0f, 22.0f,
5032
5033 // Batch 1, Channel 2
5034 23.0f, 24.0f,
5035
5036 // Batch 1, Channel 3
5037 27.0f, 28.0f,
5038
5039 // Batch 1, Channel 4
5040 29.0f, 30.0f,
5041
5042 // Batch 1, Channel 5
5043 13.0f, 14.0f,
5044
5045 // Batch 1, Channel 6
5046 15.0f, 16.0f,
5047
5048 // Batch 1, Channel 7
5049 31.0f, 32.0f,
5050 }));
5051
5052 return result;
5053}
5054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005055LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5056 armnn::IWorkloadFactory& workloadFactory,
5057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005059 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5060 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005061}
5062
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005063template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005064LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5065 armnn::IWorkloadFactory& workloadFactory,
5066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005067 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005068 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005069 int32_t qOffset)
5070{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005071 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005072 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5073 // Batch 0, Channel 0
5074 1.0f, 2.0f,
5075
5076 // Batch 0, Channel 1
5077 3.0f, 4.0f,
5078
5079 // Batch 0, Channel 2
5080 5.0f, 6.0f,
5081
5082 // Batch 1, Channel 0
5083 19.0f, 20.0f,
5084
5085 // Batch 1, Channel 1
5086 21.0f, 22.0f,
5087
5088 // Batch 1, Channel 2
5089 23.0f, 24.0f
5090 }));
5091
Jim Flynncbb66aa2019-05-15 13:03:54 +01005092 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005093 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5094 // Batch 0, Channel 0
5095 7.0f,
5096
5097 // Batch 0, Channel 1
5098 9.0f,
5099
5100 // Batch 0, Channel 2
5101 11.0f,
5102
5103 // Batch 1, Channel 0
5104 25.0f,
5105
5106 // Batch 1, Channel 1
5107 27.0f,
5108
5109 // Batch 1, Channel 2
5110 29.0f
5111 }));
5112
Jim Flynncbb66aa2019-05-15 13:03:54 +01005113 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005114 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5115 // Batch 0, Channel 0
5116 13.0f, 14.0f, 50.0f,
5117
5118 // Batch 0, Channel 1
5119 15.0f, 16.0f, 51.0f,
5120
5121 // Batch 0, Channel 2
5122 17.0f, 18.0f, 52.0f,
5123
5124 // Batch 1, Channel 0
5125 31.0f, 32.0f, 53.0f,
5126
5127 // Batch 1, Channel 1
5128 33.0f, 34.0f, 54.0f,
5129
5130 // Batch 1, Channel 2
5131 35.0f, 36.0f, 55.0f,
5132 }));
5133
Jim Flynncbb66aa2019-05-15 13:03:54 +01005134 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005135 LayerTestResult<T, 3> result(outputTensorInfo);
5136
5137 std::vector<T> output;
5138 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005139 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005140 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5141 { input0.data(), input1.data(), input2.data() },
5142 outputTensorInfo,
5143 output.data(),
5144 2,
5145 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005146
5147 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5148 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5149 // Batch 0, Channel 0
5150 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5151
5152 // Batch 0, Channel 1
5153 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5154
5155 // Batch 0, Channel 2
5156 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5157
5158 // Batch 1, Channel 0
5159 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5160
5161 // Batch 1, Channel 1
5162 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5163
5164 // Batch 1, Channel 2
5165 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5166 }));
5167
5168 return result;
5169}
5170
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005171LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5172 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5174 bool useSubtensor)
5175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005176 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5177 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005178}
5179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005180template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005181LayerTestResult<T, 4> Concatenation4dTestImpl(
5182 armnn::IWorkloadFactory& workloadFactory,
5183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5184 const armnn::TensorInfo& outputTensorInfo,
5185 unsigned int dimension,
5186 bool useSubtensor,
5187 float qScale,
5188 int32_t qOffset)
5189{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005190 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005191
5192 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5193 1.0f, 2.0f,
5194 3.0f, 4.0f,
5195 5.0f, 6.0f,
5196 7.0f, 8.0f,
5197 9.0f, 10.0f,
5198 11.0f, 12.0f
5199 }));
5200
5201 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5202 11.0f, 12.0f,
5203 13.0f, 14.0f,
5204 15.0f, 16.0f,
5205 17.0f, 18.0f,
5206 19.0f, 20.0f,
5207 21.0f, 22.0f
5208 }));
5209
5210 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5211 21.0f, 22.0f,
5212 23.0f, 24.0f,
5213 25.0f, 26.0f,
5214 27.0f, 28.0f,
5215 29.0f, 30.0f,
5216 31.0f, 32.0f
5217 }));
5218
5219 LayerTestResult<T, 4> result(outputTensorInfo);
5220
5221 std::vector<T> output;
5222 output.resize(outputTensorInfo.GetNumElements());
5223
5224 Concatenate<T>(workloadFactory,
5225 memoryManager,
5226 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5227 {input0.data(), input1.data(), input2.data()},
5228 outputTensorInfo,
5229 output.data(),
5230 dimension,
5231 useSubtensor);
5232
5233 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5234 return result;
5235}
5236
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005237template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005238LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5239 armnn::IWorkloadFactory& workloadFactory,
5240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5241 float qScale,
5242 int32_t qOffset)
5243{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005244 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005246 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5247 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5248
narpra015cdda352018-11-19 15:30:27 +00005249 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5250 1.0f, 2.0f,
5251 3.0f, 4.0f,
5252 5.0f, 6.0f,
5253 7.0f, 8.0f,
5254 9.0f, 10.0f,
5255 11.0f, 12.0f,
5256
5257 11.0f, 12.0f,
5258 13.0f, 14.0f,
5259 15.0f, 16.0f,
5260 17.0f, 18.0f,
5261 19.0f, 20.0f,
5262 21.0f, 22.0f,
5263
5264 21.0f, 22.0f,
5265 23.0f, 24.0f,
5266 25.0f, 26.0f,
5267 27.0f, 28.0f,
5268 29.0f, 30.0f,
5269 31.0f, 32.0f
5270 }));
5271 return result;
5272}
5273
5274LayerTestResult<float, 4> Concatenation4dDim0Test(
5275 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005277{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005278 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005279}
5280
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005281template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005282LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5283 armnn::IWorkloadFactory& workloadFactory,
5284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5285 float qScale,
5286 int32_t qOffset)
5287{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005288 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005289
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005290 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5291 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5292
narpra015cdda352018-11-19 15:30:27 +00005293 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5294 1.0f, 2.0f,
5295 3.0f, 4.0f,
5296 5.0f, 6.0f,
5297 7.0f, 8.0f,
5298 9.0f, 10.0f,
5299 11.0f, 12.0f,
5300
5301 11.0f, 12.0f,
5302 13.0f, 14.0f,
5303 15.0f, 16.0f,
5304 17.0f, 18.0f,
5305 19.0f, 20.0f,
5306 21.0f, 22.0f,
5307
5308 21.0f, 22.0f,
5309 23.0f, 24.0f,
5310 25.0f, 26.0f,
5311 27.0f, 28.0f,
5312 29.0f, 30.0f,
5313 31.0f, 32.0f
5314 }));
5315
5316 return result;
5317}
5318
5319LayerTestResult<float, 4> Concatenation4dDim1Test(
5320 armnn::IWorkloadFactory& workloadFactory,
5321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5322{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005323 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005324}
5325
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005326template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005327LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5328 armnn::IWorkloadFactory& workloadFactory,
5329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5330 float qScale,
5331 int32_t qOffset)
5332{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005333 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005334
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005335 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5336 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5337
narpra015cdda352018-11-19 15:30:27 +00005338 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5339 1.0f, 2.0f,
5340 3.0f, 4.0f,
5341 11.0f, 12.0f,
5342 13.0f, 14.0f,
5343 21.0f, 22.0f,
5344 23.0f, 24.0f,
5345
5346 5.0f, 6.0f,
5347 7.0f, 8.0f,
5348 15.0f, 16.0f,
5349 17.0f, 18.0f,
5350 25.0f, 26.0f,
5351 27.0f, 28.0f,
5352
5353 9.0f, 10.0f,
5354 11.0f, 12.0f,
5355 19.0f, 20.0f,
5356 21.0f, 22.0f,
5357 29.0f, 30.0f,
5358 31.0f, 32.0f
5359 }));
5360
5361 return result;
5362}
5363
5364LayerTestResult<float, 4> Concatenation4dDim2Test(
5365 armnn::IWorkloadFactory& workloadFactory,
5366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5367{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005368 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005369}
5370
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005371template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005372LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5373 armnn::IWorkloadFactory& workloadFactory,
5374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5375 float qScale,
5376 int32_t qOffset,
5377 bool useSubtensor)
5378{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005379 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005380
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005381 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5382 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5383
narpra015cdda352018-11-19 15:30:27 +00005384 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5385 1.0f, 2.0f,
5386 11.0f, 12.0f,
5387 21.0f, 22.0f,
5388 3.0f, 4.0f,
5389 13.0f, 14.0f,
5390 23.0f, 24.0f,
5391
5392 5.0f, 6.0f,
5393 15.0f, 16.0f,
5394 25.0f, 26.0f,
5395 7.0f, 8.0f,
5396 17.0f, 18.0f,
5397 27.0f, 28.0f,
5398
5399 9.0f, 10.0f,
5400 19.0f, 20.0f,
5401 29.0f, 30.0f,
5402 11.0f, 12.0f,
5403 21.0f, 22.0f,
5404 31.0f, 32.0f
5405 }));
5406
5407 return result;
5408}
5409
5410LayerTestResult<float, 4> Concatenation4dDim3Test(
5411 armnn::IWorkloadFactory& workloadFactory,
5412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5413 bool useSubtensor)
5414{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005415 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5416 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005417}
5418
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005419template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005420LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5421 armnn::IWorkloadFactory& workloadFactory,
5422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5423 float qScale,
5424 int32_t qOffset)
5425{
5426 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005427 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005428
5429 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5430 1.0f, 2.0f,
5431 3.0f, 4.0f,
5432 5.0f, 6.0f,
5433 7.0f, 8.0f,
5434 9.0f, 10.0f,
5435 11.0f, 12.0f
5436 }));
5437
Jim Flynncbb66aa2019-05-15 13:03:54 +01005438 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005439
5440 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5441 11.0f, 12.0f,
5442 13.0f, 14.0f,
5443 15.0f, 16.0f,
5444 17.0f, 18.0f,
5445 19.0f, 20.0f,
5446 21.0f, 22.0f,
5447
5448 21.0f, 22.0f,
5449 23.0f, 24.0f,
5450 25.0f, 26.0f,
5451 27.0f, 28.0f,
5452 29.0f, 30.0f,
5453 31.0f, 32.0f
5454
5455 }));
5456
Jim Flynncbb66aa2019-05-15 13:03:54 +01005457 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005458
5459 LayerTestResult<T, 4> result(outputTensorInfo);
5460
5461 std::vector<T> output;
5462 output.resize(outputTensorInfo.GetNumElements());
5463 Concatenate<T>(workloadFactory,
5464 memoryManager,
5465 {inputTensorInfo0, inputTensorInfo1},
5466 {input0.data(), input1.data()},
5467 outputTensorInfo,
5468 output.data(),
5469 dimension,
5470 true);
5471
5472 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5473 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5474 1.0f, 2.0f,
5475 3.0f, 4.0f,
5476 5.0f, 6.0f,
5477 7.0f, 8.0f,
5478 9.0f, 10.0f,
5479 11.0f, 12.0f,
5480
5481 11.0f, 12.0f,
5482 13.0f, 14.0f,
5483 15.0f, 16.0f,
5484 17.0f, 18.0f,
5485 19.0f, 20.0f,
5486 21.0f, 22.0f,
5487
5488 21.0f, 22.0f,
5489 23.0f, 24.0f,
5490 25.0f, 26.0f,
5491 27.0f, 28.0f,
5492 29.0f, 30.0f,
5493 31.0f, 32.0f
5494 }));
5495
5496 return result;
5497}
5498
5499LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5500 armnn::IWorkloadFactory& workloadFactory,
5501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005503 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5504 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005505}
5506
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005507template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005508LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5509 armnn::IWorkloadFactory& workloadFactory,
5510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5511 float qScale,
5512 int32_t qOffset)
5513{
5514 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005515 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005516
5517 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5518 1.0f, 2.0f,
5519 3.0f, 4.0f,
5520 5.0f, 6.0f,
5521 7.0f, 8.0f,
5522 9.0f, 10.0f,
5523 11.0f, 12.0f
5524 }));
5525
Jim Flynncbb66aa2019-05-15 13:03:54 +01005526 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005527
5528 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5529 11.0f, 12.0f,
5530 13.0f, 14.0f,
5531 15.0f, 16.0f,
5532 17.0f, 18.0f,
5533
5534 }));
5535
Jim Flynncbb66aa2019-05-15 13:03:54 +01005536 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005537
5538 LayerTestResult<T, 4> result(outputTensorInfo);
5539
5540 std::vector<T> output;
5541 output.resize(outputTensorInfo.GetNumElements());
5542 Concatenate<T>(workloadFactory,
5543 memoryManager,
5544 {inputTensorInfo0, inputTensorInfo1},
5545 {input0.data(), input1.data()},
5546 outputTensorInfo,
5547 output.data(),
5548 dimension,
5549 true);
5550
5551 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5552 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5553 1.0f, 2.0f,
5554 3.0f, 4.0f,
5555 5.0f, 6.0f,
5556 7.0f, 8.0f,
5557 9.0f, 10.0f,
5558 11.0f, 12.0f,
5559 11.0f, 12.0f,
5560 13.0f, 14.0f,
5561 15.0f, 16.0f,
5562 17.0f, 18.0f
5563 }));
5564
5565 return result;
5566}
5567
5568LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5569 armnn::IWorkloadFactory& workloadFactory,
5570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5571{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005572 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5573 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005574}
5575
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005576template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005577LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5578 armnn::IWorkloadFactory& workloadFactory,
5579 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5580 float qScale,
5581 int32_t qOffset)
5582{
5583 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005584 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005585
5586 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5587 1.0f, 2.0f,
5588 3.0f, 4.0f,
5589 5.0f, 6.0f,
5590 7.0f, 8.0f,
5591 9.0f, 10.0f,
5592 11.0f, 12.0f
5593 }));
5594
Jim Flynncbb66aa2019-05-15 13:03:54 +01005595 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005596
5597 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5598 11.0f, 12.0f,
5599 13.0f, 14.0f,
5600 15.0f, 16.0f,
5601 17.0f, 18.0f,
5602 19.0f, 20.0f,
5603 21.0f, 22.0f,
5604 23.0f, 24.0f,
5605 25.0f, 26.0f,
5606 27.0f, 28.0f
5607 }));
5608
Jim Flynncbb66aa2019-05-15 13:03:54 +01005609 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005610
5611 LayerTestResult<T, 4> result(outputTensorInfo);
5612
5613 std::vector<T> output;
5614 output.resize(outputTensorInfo.GetNumElements());
5615 Concatenate<T>(workloadFactory,
5616 memoryManager,
5617 {inputTensorInfo0, inputTensorInfo1},
5618 {input0.data(), input1.data()},
5619 outputTensorInfo,
5620 output.data(),
5621 dimension,
5622 true);
5623
5624 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5625 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5626 1.0f, 2.0f,
5627 3.0f, 4.0f,
5628 11.0f, 12.0f,
5629 13.0f, 14.0f,
5630 15.0f, 16.0f,
5631
5632 5.0f, 6.0f,
5633 7.0f, 8.0f,
5634 17.0f, 18.0f,
5635 19.0f, 20.0f,
5636 21.0f, 22.0f,
5637
5638 9.0f, 10.0f,
5639 11.0f, 12.0f,
5640 23.0f, 24.0f,
5641 25.0f, 26.0f,
5642 27.0f, 28.0f
5643 }));
5644
5645 return result;
5646}
5647
5648LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5649 armnn::IWorkloadFactory& workloadFactory,
5650 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5651{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005652 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5653 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005654}
5655
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005656template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005657LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5658 armnn::IWorkloadFactory& workloadFactory,
5659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5660 float qScale,
5661 int32_t qOffset,
5662 bool useSubtensor)
5663{
5664 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005665 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005666
5667 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5668 1.0f, 2.0f,
5669 3.0f, 4.0f,
5670 5.0f, 6.0f,
5671 7.0f, 8.0f,
5672 9.0f, 10.0f,
5673 11.0f, 12.0f
5674 }));
5675
Jim Flynncbb66aa2019-05-15 13:03:54 +01005676 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005677
5678 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5679 11.0f, 12.0f, 13.0f,
5680 14.0f, 15.0f, 16.0f,
5681
5682 17.0f, 18.0f, 19.0f,
5683 20.0f, 21.0f, 22.0f,
5684
5685 23.0f, 24.0f, 25.0f,
5686 26.0f, 27.0f, 28.0f
5687 }));
5688
Jim Flynncbb66aa2019-05-15 13:03:54 +01005689 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005690
5691 LayerTestResult<T, 4> result(outputTensorInfo);
5692
5693 std::vector<T> output;
5694 output.resize(outputTensorInfo.GetNumElements());
5695 Concatenate<T>(workloadFactory,
5696 memoryManager,
5697 {inputTensorInfo0, inputTensorInfo1},
5698 {input0.data(), input1.data()},
5699 outputTensorInfo,
5700 output.data(),
5701 dimension,
5702 useSubtensor);
5703
5704 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5705 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5706 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5707 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5708 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5709 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5710 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5711 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5712 }));
5713
5714 return result;
5715}
5716
5717LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5718 armnn::IWorkloadFactory& workloadFactory,
5719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5720 bool useSubtensor)
5721{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005722 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5723 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005724}
5725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005726LayerTestResult<float, 2> FakeQuantizationTest(
5727 armnn::IWorkloadFactory& workloadFactory,
5728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005729{
5730 constexpr unsigned int width = 2;
5731 constexpr unsigned int height = 3;
5732
5733 const armnn::TensorInfo tensorInfo({height, width },
5734 armnn::DataType::Float32);
5735 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5736 -10.0f, -5.0f,
5737 0.0f, 5.0f,
5738 10.0f, 10.0f
5739 }));
5740
5741 LayerTestResult<float, 2> ret(tensorInfo);
5742
5743 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5744
5745 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5746
5747 armnn::FakeQuantizationQueueDescriptor data;
5748 armnn::WorkloadInfo info;
5749
5750 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5751 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5752 float min = -10.f;
5753 float max = 10.f;
5754
5755 data.m_Parameters.m_Min = min;
5756 data.m_Parameters.m_Max = max;
5757
5758 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5759 armnn::FakeQuantizationQueueDescriptor refData = data;
5760 armnn::WorkloadInfo refInfo = info;
5761 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5762
5763 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5764
5765 inputHandle->Allocate();
5766 outputHandle->Allocate();
5767
5768 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5769
Derek Lambertif30f7d32019-04-09 10:25:02 +01005770 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005771 workload->Execute();
5772
5773 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5774
5775 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5776 0.0f, 63.0f,
5777 128.0f, 191.0f,
5778 255.0f, 255.0f
5779 }));
5780 return ret;
5781}
5782
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005783namespace
5784{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005785template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
5786LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005787 armnn::IWorkloadFactory& workloadFactory,
5788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5789 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005790 float scale,
5791 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005792 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005793 float outScale,
5794 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005795 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01005796 const armnn::DataLayout layout,
5797 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005798{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01005799 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
5800 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005801
jimfly013aab7c32018-11-12 13:32:08 +00005802 // at this point if we require it permute the input data
5803 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5804 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005805 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005806 {
5807 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005808 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005809 inputData = tmp;
5810 }
5811
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005812 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
5813 inputTensorInfo.GetQuantizationScale(),
5814 inputTensorInfo.GetQuantizationOffset(),
5815 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005816
jimfly013aab7c32018-11-12 13:32:08 +00005817 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005818 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005819 {
5820 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005821 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
5822 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005823 expectedOutputData = tmp;
5824 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01005825
5826 LayerTestResult<T, 4> result(outputTensorInfo);
5827 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
5828 outputTensorInfo.GetQuantizationScale(),
5829 outputTensorInfo.GetQuantizationOffset(),
5830 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005831
5832 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5833 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5834
5835 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01005836 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00005837 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005838 armnn::WorkloadInfo info;
5839
5840 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5841 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5842
5843 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5844
5845 inputHandle->Allocate();
5846 outputHandle->Allocate();
5847
5848 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5849
Derek Lambertif30f7d32019-04-09 10:25:02 +01005850 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005851 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005852
5853 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5854
5855 return result;
5856}
5857
5858float CalcInvL2Norm(std::initializer_list<float> elements)
5859{
5860 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5861 [](float acc, float element) { return acc + element * element; });
5862 return 1.0f / sqrtf(reduction);
5863}
5864
5865} // anonymous namespace
5866
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005867template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005868LayerTestResult<T, 2> Pad2dTestCommon(
5869 armnn::IWorkloadFactory& workloadFactory,
5870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5871 float qScale,
5872 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005873{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005874 const armnn::TensorShape inputShape{ 3, 3 };
5875 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005876
Derek Lambertif30f7d32019-04-09 10:25:02 +01005877 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5878 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005879
Derek Lambertif30f7d32019-04-09 10:25:02 +01005880 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005881 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005882 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005883 // Height (3) x Width (3)
5884 4, 8, 6,
5885 7, 4, 4,
5886 3, 2, 4
5887 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005888
Derek Lambertif30f7d32019-04-09 10:25:02 +01005889 std::vector<T> expectedOutputValues(
5890 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005891 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005892 0, 0, 0, 0, 0, 0, 0,
5893 0, 0, 0, 0, 0, 0, 0,
5894 0, 0, 4, 8, 6, 0, 0,
5895 0, 0, 7, 4, 4, 0, 0,
5896 0, 0, 3, 2, 4, 0, 0,
5897 0, 0, 0, 0, 0, 0, 0,
5898 0, 0, 0, 0, 0, 0, 0
5899 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005900
Derek Lambertif30f7d32019-04-09 10:25:02 +01005901 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005902
Derek Lambertif30f7d32019-04-09 10:25:02 +01005903 LayerTestResult<T, 2> result(outputTensorInfo);
5904 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005905
Derek Lambertif30f7d32019-04-09 10:25:02 +01005906 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5907 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005908
Derek Lambertif30f7d32019-04-09 10:25:02 +01005909 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005910
Derek Lambertif30f7d32019-04-09 10:25:02 +01005911 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5912 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5913 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005914
Derek Lambertif30f7d32019-04-09 10:25:02 +01005915 descriptor.m_Parameters.m_PadList = PadList;
5916 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005917
Derek Lambertif30f7d32019-04-09 10:25:02 +01005918 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5919 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005920
Derek Lambertif30f7d32019-04-09 10:25:02 +01005921 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005922
Derek Lambertif30f7d32019-04-09 10:25:02 +01005923 inputHandle->Allocate();
5924 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005925
Derek Lambertif30f7d32019-04-09 10:25:02 +01005926 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Derek Lambertif30f7d32019-04-09 10:25:02 +01005928 workload->PostAllocationConfigure();
5929 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005930
Derek Lambertif30f7d32019-04-09 10:25:02 +01005931 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005932
Derek Lambertif30f7d32019-04-09 10:25:02 +01005933 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005934}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005935
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005936template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005937LayerTestResult<T, 3> Pad3dTestCommon(
5938 armnn::IWorkloadFactory& workloadFactory,
5939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5940 float qScale,
5941 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005942{
5943 const armnn::TensorShape inputShape{ 2, 2, 2 };
5944 const armnn::TensorShape outputShape{ 3, 5, 6 };
5945
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005946 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5947 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005948
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005949 std::vector<T> inputValues(
5950 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005951 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005952 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005953 0, 4,
5954 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005955
5956 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005957 6, 1,
5958 5, 2
5959 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005960
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005961 std::vector<T> expectedOutputValues(
5962 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005963 {
5964
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005965 0, 0, 0, 0, 0, 0,
5966 0, 0, 0, 0, 0, 0,
5967 0, 0, 0, 4, 0, 0,
5968 0, 0, 2, 5, 0, 0,
5969 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005970
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005971 0, 0, 0, 0, 0, 0,
5972 0, 0, 0, 0, 0, 0,
5973 0, 0, 6, 1, 0, 0,
5974 0, 0, 5, 2, 0, 0,
5975 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005976
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005977 0, 0, 0, 0, 0, 0,
5978 0, 0, 0, 0, 0, 0,
5979 0, 0, 0, 0, 0, 0,
5980 0, 0, 0, 0, 0, 0,
5981 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005982
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005983 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005984
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005985 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005986
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005987 LayerTestResult<T, 3> result(outputTensorInfo);
5988 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005989
5990 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5991 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5992
5993 armnn::PadQueueDescriptor descriptor;
5994
5995 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5996 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5997 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5998 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5999
6000 descriptor.m_Parameters.m_PadList = PadList;
6001 armnn::WorkloadInfo info;
6002
6003 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6004 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6005
6006 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6007
6008 inputHandle->Allocate();
6009 outputHandle->Allocate();
6010
6011 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6012
Derek Lambertif30f7d32019-04-09 10:25:02 +01006013 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006014 workload->Execute();
6015
6016 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6017
6018 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006019}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006020
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006021template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006022LayerTestResult<T, 4> Pad4dTestCommon(
6023 armnn::IWorkloadFactory& workloadFactory,
6024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6025 float qScale,
6026 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006027{
6028 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6029 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6030
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006031 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
6032 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006033
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006034 std::vector<T> inputValues(
6035 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006036 {
6037 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006038 0, 1,
6039 2, 3,
6040 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006041
6042 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006043 6, 7,
6044 8, 9,
6045 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006046
6047 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006048 12, 13,
6049 14, 15,
6050 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006051
6052 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006053 18, 19,
6054 20, 21,
6055 22, 23
6056 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006057
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006058 std::vector<T> expectedOutputValues(
6059 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006060 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006061 0, 0, 0, 0,
6062 0, 0, 0, 0,
6063 0, 0, 0, 0,
6064 0, 0, 0, 0,
6065 0, 0, 0, 0,
6066 0, 0, 0, 0,
6067 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006068
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006069 0, 0, 0, 0,
6070 0, 0, 0, 0,
6071 0, 0, 0, 0,
6072 0, 0, 0, 0,
6073 0, 0, 0, 0,
6074 0, 0, 0, 0,
6075 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006076
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006077 0, 0, 0, 0,
6078 0, 0, 0, 0,
6079 0, 0, 0, 0,
6080 0, 0, 0, 0,
6081 0, 0, 0, 0,
6082 0, 0, 0, 0,
6083 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006084
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006085 0, 0, 0, 0,
6086 0, 0, 0, 0,
6087 0, 0, 0, 0,
6088 0, 0, 0, 0,
6089 0, 0, 0, 0,
6090 0, 0, 0, 0,
6091 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006092
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006093 0, 0, 0, 0,
6094 0, 0, 0, 0,
6095 0, 0, 0, 0,
6096 0, 0, 0, 0,
6097 0, 0, 0, 0,
6098 0, 0, 0, 0,
6099 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006100
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006101 0, 0, 0, 0,
6102 0, 0, 0, 0,
6103 0, 0, 0, 0,
6104 0, 0, 0, 0,
6105 0, 0, 0, 0,
6106 0, 0, 0, 0,
6107 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006108
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006109 0, 0, 0, 0,
6110 0, 0, 0, 0,
6111 0, 0, 0, 0,
6112 0, 0, 0, 0,
6113 0, 0, 0, 0,
6114 0, 0, 0, 0,
6115 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006116
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006117 0, 0, 0, 0,
6118 0, 0, 0, 0,
6119 0, 0, 0, 0,
6120 0, 0, 1, 0,
6121 0, 2, 3, 0,
6122 0, 4, 5, 0,
6123 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006124
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006125 0, 0, 0, 0,
6126 0, 0, 0, 0,
6127 0, 0, 0, 0,
6128 0, 6, 7, 0,
6129 0, 8, 9, 0,
6130 0, 10, 11, 0,
6131 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006132
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006133 0, 0, 0, 0,
6134 0, 0, 0, 0,
6135 0, 0, 0, 0,
6136 0, 0, 0, 0,
6137 0, 0, 0, 0,
6138 0, 0, 0, 0,
6139 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006140
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006141 0, 0, 0, 0,
6142 0, 0, 0, 0,
6143 0, 0, 0, 0,
6144 0, 0, 0, 0,
6145 0, 0, 0, 0,
6146 0, 0, 0, 0,
6147 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006148
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006149 0, 0, 0, 0,
6150 0, 0, 0, 0,
6151 0, 0, 0, 0,
6152 0, 0, 0, 0,
6153 0, 0, 0, 0,
6154 0, 0, 0, 0,
6155 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006156
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006157 0, 0, 0, 0,
6158 0, 0, 0, 0,
6159 0, 0, 0, 0,
6160 0, 12, 13, 0,
6161 0, 14, 15, 0,
6162 0, 16, 17, 0,
6163 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006164
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006165 0, 0, 0, 0,
6166 0, 0, 0, 0,
6167 0, 0, 0, 0,
6168 0, 18, 19, 0,
6169 0, 20, 21, 0,
6170 0, 22, 23, 0,
6171 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006172
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006173 0, 0, 0, 0,
6174 0, 0, 0, 0,
6175 0, 0, 0, 0,
6176 0, 0, 0, 0,
6177 0, 0, 0, 0,
6178 0, 0, 0, 0,
6179 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006180
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006181 0, 0, 0, 0,
6182 0, 0, 0, 0,
6183 0, 0, 0, 0,
6184 0, 0, 0, 0,
6185 0, 0, 0, 0,
6186 0, 0, 0, 0,
6187 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006188
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006189 0, 0, 0, 0,
6190 0, 0, 0, 0,
6191 0, 0, 0, 0,
6192 0, 0, 0, 0,
6193 0, 0, 0, 0,
6194 0, 0, 0, 0,
6195 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006196
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006197 0, 0, 0, 0,
6198 0, 0, 0, 0,
6199 0, 0, 0, 0,
6200 0, 0, 0, 0,
6201 0, 0, 0, 0,
6202 0, 0, 0, 0,
6203 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006204
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006205 0, 0, 0, 0,
6206 0, 0, 0, 0,
6207 0, 0, 0, 0,
6208 0, 0, 0, 0,
6209 0, 0, 0, 0,
6210 0, 0, 0, 0,
6211 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006212
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006213 0, 0, 0, 0,
6214 0, 0, 0, 0,
6215 0, 0, 0, 0,
6216 0, 0, 0, 0,
6217 0, 0, 0, 0,
6218 0, 0, 0, 0,
6219 0, 0, 0, 0
6220 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006221
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006222 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006223
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006224 LayerTestResult<T, 4> result(outputTensorInfo);
6225 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006226
6227 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6228 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6229
6230 armnn::PadQueueDescriptor descriptor;
6231
6232 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6233 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6234 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6235 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6236 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6237
6238 descriptor.m_Parameters.m_PadList = PadList;
6239 armnn::WorkloadInfo info;
6240
6241 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6242 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6243
6244 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6245
6246 inputHandle->Allocate();
6247 outputHandle->Allocate();
6248
6249 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6250
Derek Lambertif30f7d32019-04-09 10:25:02 +01006251 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006252 workload->Execute();
6253
6254 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6255
6256 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006257}
6258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006259LayerTestResult<uint8_t, 2> PadUint82dTest(
6260 armnn::IWorkloadFactory& workloadFactory,
6261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006262{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006263 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006264}
6265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006266LayerTestResult<uint8_t, 3> PadUint83dTest(
6267 armnn::IWorkloadFactory& workloadFactory,
6268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006269{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006270 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006271}
6272
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006273LayerTestResult<uint8_t, 4> PadUint84dTest(
6274 armnn::IWorkloadFactory& workloadFactory,
6275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006276{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006277 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006278}
6279
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006280LayerTestResult<float, 2> PadFloat322dTest(
6281 armnn::IWorkloadFactory& workloadFactory,
6282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006283{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006284 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006285}
6286
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006287LayerTestResult<float, 3> PadFloat323dTest(
6288 armnn::IWorkloadFactory& workloadFactory,
6289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006290{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006291 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006292}
6293
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006294LayerTestResult<float, 4> PadFloat324dTest(
6295 armnn::IWorkloadFactory& workloadFactory,
6296 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006297{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006298 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006299}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006300
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006301template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006302LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6303 armnn::IWorkloadFactory& workloadFactory,
6304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6305 float scale,
6306 int32_t offset,
6307 float outScale,
6308 int32_t outOffset,
6309 const armnn::DataLayout layout,
6310 float epsilon)
6311{
6312 // Width: 1
6313 // Height: 1
6314 // Channels: 3
6315 // BatchSize: 1
6316 unsigned int numberOfBatches = 1;
6317 unsigned int numberOfChannels = 3;
6318 unsigned int height = 1;
6319 unsigned int width = 1;
6320
6321 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6322 numberOfBatches, numberOfChannels, height, width, layout);
6323
6324 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6325 std::vector<float> inputValues
6326 {
6327 // Batch 0, Channel 0, Height (1) x Width (1)
6328 0.00000001f,
6329
6330 // Batch 0, Channel 1, Height (1) x Width (1)
6331 0.00000002f,
6332
6333 // Batch 0, Channel 2, Height (1) x Width (1)
6334 0.00000003f,
6335 };
6336
6337 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6338 std::vector<float> expectedOutputValues
6339 {
6340 // Batch 0, Channel 0, Height (1) x Width (1)
6341 0.00000001f * approxInvL2Norm,
6342 0.00000002f * approxInvL2Norm,
6343 0.00000003f * approxInvL2Norm,
6344 };
6345
6346 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6347 inputValues, outScale, outOffset, expectedOutputValues, layout,
6348 epsilon);
6349}
6350
6351
6352template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006353LayerTestResult<T, 4> L2Normalization1dTestCommon(
6354 armnn::IWorkloadFactory& workloadFactory,
6355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006356 float scale,
6357 int32_t offset,
6358 float outScale,
6359 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006360 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006361{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006362 // Width: 1
6363 // Height: 1
6364 // Channels: 10
6365 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006366 unsigned int numberOfBatches = 1;
6367 unsigned int numberOfChannels = 10;
6368 unsigned int height = 1;
6369 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006370
jimfly013aab7c32018-11-12 13:32:08 +00006371
Nina Drozdd41b2592018-11-19 13:03:36 +00006372 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006373 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006374 std::vector<float> inputValues
6375 {
6376 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006377 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006378
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006379 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006380 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006381
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006382 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006383 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006384
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006385 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006386 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006387
6388 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006389 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006390
6391 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006392 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006393
6394 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006395 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006396
6397 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006398 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006399
6400 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006401 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006402
6403 // Batch 0, Channel 9, Height (1) x Width (1)
6404 10.0f
6405 };
telsoa014fcda012018-03-09 14:13:49 +00006406 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006407 std::vector<float> expectedOutputValues
6408 {
6409 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006410 1.0f * approxInvL2Norm,
6411 2.0f * approxInvL2Norm,
6412 3.0f * approxInvL2Norm,
6413 4.0f * approxInvL2Norm,
6414 5.0f * approxInvL2Norm,
6415 6.0f * approxInvL2Norm,
6416 7.0f * approxInvL2Norm,
6417 8.0f * approxInvL2Norm,
6418 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006419 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006420 };
telsoa014fcda012018-03-09 14:13:49 +00006421
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006422
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006423 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6424 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006425}
6426
Ferran Balaguere52211e2019-06-17 12:23:52 +01006427LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6428 armnn::IWorkloadFactory& workloadFactory,
6429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6430 const armnn::DataLayout layout)
6431{
6432 // Dummy descriptor to get the default value of epsilon.
6433 armnn::L2NormalizationDescriptor descriptor;
6434
6435 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6436 layout, descriptor.m_Eps);
6437}
6438
6439LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6440 armnn::IWorkloadFactory& workloadFactory,
6441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6442 const armnn::DataLayout layout)
6443{
6444 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6445 layout, 1e-9f);
6446}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006447
6448LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006449 armnn::IWorkloadFactory& workloadFactory,
6450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006451 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006452{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006453 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006454}
6455
6456LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6457 armnn::IWorkloadFactory& workloadFactory,
6458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6459 const armnn::DataLayout layout)
6460{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006461 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006462 layout);
6463}
6464
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006465LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6466 armnn::IWorkloadFactory& workloadFactory,
6467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6468 const armnn::DataLayout layout)
6469{
6470 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6471 1.f/128, 128, layout);
6472}
6473
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006474template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6475LayerTestResult<T, 4> L2Normalization2dTestCommon(
6476 armnn::IWorkloadFactory& workloadFactory,
6477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006478 float scale,
6479 int32_t offset,
6480 float outScale,
6481 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006482 const armnn::DataLayout layout)
6483{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006484 // Width: 5
6485 // Height: 1
6486 // Channels: 2
6487 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006488 unsigned int numberOfBatches = 1;
6489 unsigned int numberOfChannels = 2;
6490 unsigned int height = 1;
6491 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006492
Nina Drozdd41b2592018-11-19 13:03:36 +00006493 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006494 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006495 std::vector<float> inputValues
6496 {
6497 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006498 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006499
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006500 // Batch 0, Channel 1, Height (1) x Width (5)
6501 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6502 };
6503 std::vector<float> expectedOutputValues
6504 {
6505 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006506 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6507 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6508 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6509 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6510 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006511
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006512 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006513 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6514 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6515 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6516 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006517 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006518 };
telsoa014fcda012018-03-09 14:13:49 +00006519
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006520 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6521 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006522}
telsoa014fcda012018-03-09 14:13:49 +00006523
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006524LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006525 armnn::IWorkloadFactory& workloadFactory,
6526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006527 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006528{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006529 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6530 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006531}
6532
6533LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6534 armnn::IWorkloadFactory& workloadFactory,
6535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6536 const armnn::DataLayout layout)
6537{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006538 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006539 layout);
6540}
6541
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006542LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6543 armnn::IWorkloadFactory& workloadFactory,
6544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6545 const armnn::DataLayout layout)
6546{
6547 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6548 1.f/128, 128, layout);
6549}
6550
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006551template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6552LayerTestResult<T, 4> L2Normalization3dTestCommon(
6553 armnn::IWorkloadFactory& workloadFactory,
6554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006555 float scale,
6556 int32_t offset,
6557 float outScale,
6558 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006559 const armnn::DataLayout layout)
6560{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006561 // Width: 3
6562 // Height: 4
6563 // Channels: 2
6564 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006565 unsigned int numberOfBatches = 1;
6566 unsigned int numberOfChannels = 2;
6567 unsigned int height = 4;
6568 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006569
Nina Drozdd41b2592018-11-19 13:03:36 +00006570 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006571 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006572 std::vector<float> inputValues
6573 {
6574 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006575 119.0f, 21.0f, 150.0f,
6576 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006577 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006578 147.0f, 199.0f, 220.0f,
6579
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006580 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006581 110.0f, 140.0f, 73.0f,
6582 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006583 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006584 162.0f, 12.0f, 161.0f
6585 };
6586 std::vector<float> expectedOutputValues
6587 {
6588 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006589 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006590 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006591 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6592 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006593 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006594 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006595 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006596 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6597 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6598 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6599 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6600 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6601
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006602 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006603 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6604 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006605 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006606 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6607 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006608 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6609 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006610 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6611 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6612 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006613 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006614 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6615 };
telsoa014fcda012018-03-09 14:13:49 +00006616
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006617 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6618 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006619}
telsoa014fcda012018-03-09 14:13:49 +00006620
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006621LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006622 armnn::IWorkloadFactory& workloadFactory,
6623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006624 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006625{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006626 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6627 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006628}
6629
6630LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6631 armnn::IWorkloadFactory& workloadFactory,
6632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6633 const armnn::DataLayout layout)
6634{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006635 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006636 layout);
6637}
6638
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006639LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6640 armnn::IWorkloadFactory& workloadFactory,
6641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6642 const armnn::DataLayout layout)
6643{
6644 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6645 1.f/128, 128, layout);
6646}
6647
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006648template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6649LayerTestResult<T, 4> L2Normalization4dTestCommon(
6650 armnn::IWorkloadFactory& workloadFactory,
6651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006652 float scale,
6653 int32_t offset,
6654 float outScale,
6655 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006656 const armnn::DataLayout layout)
6657{
6658 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006659 // Height: 4
6660 // Channels: 3
6661 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006662 unsigned int numberOfBatches = 2;
6663 unsigned int numberOfChannels = 3;
6664 unsigned int height = 4;
6665 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006666
Nina Drozdd41b2592018-11-19 13:03:36 +00006667 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006668 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006669 std::vector<float> inputValues
6670 {
6671 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006672 235.0f, 46.0f, 178.0f,
6673 100.0f, 123.0f, 19.0f,
6674 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006675 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006676
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006677 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006678 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006679 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006680 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006681 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006682
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006683 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006684 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006685 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006686 12.0f, 209.0f, 200.0f,
6687 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006688
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006689 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006690 67.0f, 90.0f, 49.0f,
6691 7.0f, 163.0f, 18.0f,
6692 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006693 247.0f, 59.0f, 189.0f,
6694
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006695 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006696 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006697 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006698 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006699 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006700
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006701 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006702 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006703 115.0f, 116.0f, 238.0f,
6704 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006705 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006706 };
6707 std::vector<float> expectedOutputValues
6708 {
6709 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006710 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006711 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006712 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6713 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6714 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006715 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006716 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006717 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006718 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006719 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006720 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006721 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006722
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006723 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006724 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006725 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006726 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006727 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006728 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006729 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006730 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6731 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6732 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006733 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6734 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6735 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006736
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006737 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006738 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006739 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6740 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6741 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006742 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006743 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006744 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006745 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6746 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006747 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6748 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6749 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006750
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006751 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006752 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6753 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6754 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6755 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006756 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006757 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6758 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006759 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6760 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6761 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006762 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006763 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6764
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006765 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006766 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6767 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6768 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006769 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006770 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6771 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6772 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6773 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006774 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6775 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006776 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006777 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006778
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006779 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006780 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006781 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6782 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6783 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6784 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6785 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6786 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006787 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006788 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006789 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006790 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006791 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006792 };
telsoa014fcda012018-03-09 14:13:49 +00006793
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006794 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6795 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006796}
6797
6798LayerTestResult<float, 4> L2Normalization4dTest(
6799 armnn::IWorkloadFactory& workloadFactory,
6800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6801 const armnn::DataLayout layout)
6802{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006803 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6804 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006805}
6806
6807LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
6808 armnn::IWorkloadFactory& workloadFactory,
6809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6810 const armnn::DataLayout layout)
6811{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006812 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006813 layout);
telsoa014fcda012018-03-09 14:13:49 +00006814}
6815
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006816LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
6817 armnn::IWorkloadFactory& workloadFactory,
6818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6819 const armnn::DataLayout layout)
6820{
6821 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6822 1.f/128, 128, layout);
6823}
6824
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006825template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006826LayerTestResult<T, 4> ConstantTestImpl(
6827 armnn::IWorkloadFactory& workloadFactory,
6828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006829 float qScale,
6830 int32_t qOffset)
6831{
6832 constexpr unsigned int inputWidth = 3;
6833 constexpr unsigned int inputHeight = 4;
6834 constexpr unsigned int inputChannels = 3;
6835 constexpr unsigned int inputBatchSize = 2;
6836
6837 constexpr unsigned int outputWidth = inputWidth;
6838 constexpr unsigned int outputHeight = inputHeight;
6839 constexpr unsigned int outputChannels = inputChannels;
6840 constexpr unsigned int outputBatchSize = inputBatchSize;
6841
Nina Drozd58ef2c62019-05-16 12:09:18 +01006842 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6843 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006844
Nina Drozd58ef2c62019-05-16 12:09:18 +01006845 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6846 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006847
6848 // Set quantization parameters if the requested type is a quantized type.
6849 if(armnn::IsQuantizedType<T>())
6850 {
6851 inputTensorInfo.SetQuantizationScale(qScale);
6852 inputTensorInfo.SetQuantizationOffset(qOffset);
6853 outputTensorInfo.SetQuantizationScale(qScale);
6854 outputTensorInfo.SetQuantizationOffset(qOffset);
6855 }
6856
6857 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6858 QuantizedVector<T>(qScale, qOffset, {
6859 // Batch 0, Channel 0
6860 235.0f, 46.0f, 178.0f,
6861 100.0f, 123.0f, 19.0f,
6862 172.0f, 74.0f, 250.0f,
6863 6.0f, 195.0f, 80.0f,
6864
6865 // Batch 0, Channel 1
6866 113.0f, 95.0f, 202.0f,
6867 77.0f, 114.0f, 71.0f,
6868 122.0f, 246.0f, 166.0f,
6869 82.0f, 28.0f, 37.0f,
6870
6871 // Batch 0, Channel 2
6872 56.0f, 170.0f, 162.0f,
6873 194.0f, 89.0f, 254.0f,
6874 12.0f, 209.0f, 200.0f,
6875 1.0f, 64.0f, 54.0f,
6876
6877 // Batch 1, Channel 0
6878 67.0f, 90.0f, 49.0f,
6879 7.0f, 163.0f, 18.0f,
6880 25.0f, 117.0f, 103.0f,
6881 247.0f, 59.0f, 189.0f,
6882
6883 // Batch 1, Channel 1
6884 239.0f, 104.0f, 199.0f,
6885 17.0f, 124.0f, 153.0f,
6886 222.0f, 217.0f, 75.0f,
6887 32.0f, 126.0f, 21.0f,
6888
6889 // Batch 1, Channel 2
6890 97.0f, 145.0f, 215.0f,
6891 115.0f, 116.0f, 238.0f,
6892 226.0f, 16.0f, 132.0f,
6893 92.0f, 125.0f, 88.0f,
6894 })));
6895
6896 LayerTestResult<T, 4> result(outputTensorInfo);
6897 result.outputExpected = input;
6898
6899 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6900
6901 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6902 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6903
6904 armnn::ConstantQueueDescriptor descriptor;
6905 descriptor.m_LayerOutput = &constantTensor;
6906
6907 armnn::WorkloadInfo info;
6908 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6909
6910 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6911
6912 outputHandle->Allocate();
6913
Derek Lambertif30f7d32019-04-09 10:25:02 +01006914 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006915 workload->Execute();
6916
6917 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6918 return result;
6919}
6920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006921LayerTestResult<float, 4> ConstantTest(
6922 armnn::IWorkloadFactory& workloadFactory,
6923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006925 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006926}
6927
Nina Drozd58ef2c62019-05-16 12:09:18 +01006928LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6929 armnn::IWorkloadFactory& workloadFactory,
6930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6931{
6932 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6933}
6934
6935LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006936 armnn::IWorkloadFactory& workloadFactory,
6937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006938{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006939 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006940}
6941
Jim Flynn4ed6c832019-05-20 11:02:46 +01006942LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006943 armnn::IWorkloadFactory& workloadFactory,
6944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6945{
6946 unsigned int outputWidth = 3;
6947 unsigned int outputHeight = 6;
6948 unsigned int outputChannels = 3;
6949
6950 unsigned int inputWidth1 = 3;
6951 unsigned int inputHeight1 = 6;
6952 unsigned int inputChannels1 = 2;
6953
6954 unsigned int inputWidth2 = 3;
6955 unsigned int inputHeight2 = 6;
6956 unsigned int inputChannels2 = 1;
6957
6958 // Defines the tensor descriptors.
6959 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6960 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6961 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6962
6963 // Quantized input1 tensor. Range [-3, 1]
6964 const float inputScale1 = 0.015686f;
6965 const int32_t inputOffset1 = 192;
6966
6967 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6968 {
6969 1, 2, 3,
6970 4, 5, 6,
6971 7, 8, 9,
6972 10, 11, 12,
6973 13, 14, 15,
6974 16, 17, 18,
6975
6976 19, 20, 21,
6977 22, 23, 24,
6978 25, 26, 27,
6979 28, 29, 30,
6980 31, 32, 33,
6981 34, 35, 36,
6982 })
6983 );
6984
6985 // Quatized input2 tensor. Range [-1, 4]
6986 const float inputScale2 = 0.019608f;
6987 const int32_t inputOffset2 = 50;
6988
6989 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6990 {
6991 37, 38, 39,
6992 40, 41, 42,
6993 43, 44, 45,
6994 46, 47, 48,
6995 49, 50, 51,
6996 52, 53, 54,
6997 })
6998 );
6999
7000 // Output has the same quantization parameters than input1,
7001 // so that only the requantization of input2 is required
7002 const float outputScale = 0.015686f;
7003 const int32_t outputOffset = 192;
7004
7005 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7006
7007 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7008 {
7009 1, 2, 3,
7010 4, 5, 6,
7011 7, 8, 9,
7012 10, 11, 12,
7013 13, 14, 15,
7014 16, 17, 18,
7015
7016 19, 20, 21,
7017 22, 23, 24,
7018 25, 26, 27,
7019 28, 29, 30,
7020 31, 32, 33,
7021 34, 35, 36,
7022
7023 176, 177, 178,
7024 179, 181, 182,
7025 183, 184, 186,
7026 187, 188, 189,
7027 191, 192, 193,
7028 195, 196, 197,
7029 })
7030 );
7031
7032 outputTensorInfo.SetQuantizationScale(outputScale);
7033 outputTensorInfo.SetQuantizationOffset(outputOffset);
7034 inputTensorInfo1.SetQuantizationScale(inputScale1);
7035 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7036 inputTensorInfo2.SetQuantizationScale(inputScale2);
7037 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7038
7039 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007040 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007041
7042 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007043 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007044
7045 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7046
7047 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7048
7049 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7050 subTensorsSupported ?
7051 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7052 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7053
7054 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7055 subTensorsSupported ?
7056 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7057 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7058
Jim Flynne242f2d2019-05-22 14:24:13 +01007059 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007060 armnn::WorkloadInfo info;
7061 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7062 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7063 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7064
7065 data.m_ViewOrigins.push_back(window1);
7066 data.m_ViewOrigins.push_back(window2);
7067
Jim Flynn4ed6c832019-05-20 11:02:46 +01007068 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007069
7070 inputHandle1->Allocate();
7071 inputHandle2->Allocate();
7072 outputHandle->Allocate();
7073
7074 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7075 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7076
Derek Lambertif30f7d32019-04-09 10:25:02 +01007077 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007078 workload->Execute();
7079
7080 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7081
7082 return ret;
7083}
7084
Jim Flynn4ed6c832019-05-20 11:02:46 +01007085LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007086 armnn::IWorkloadFactory& workloadFactory,
7087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007088{
surmeh013537c2c2018-05-18 16:31:43 +01007089 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007090 unsigned int outputHeight = 6;
7091 unsigned int outputChannels = 3;
7092
surmeh013537c2c2018-05-18 16:31:43 +01007093 unsigned int inputWidth1 = 3;
7094 unsigned int inputHeight1 = 6;
7095 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007096
surmeh013537c2c2018-05-18 16:31:43 +01007097 unsigned int inputWidth2 = 3;
7098 unsigned int inputHeight2 = 6;
7099 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007100
telsoa01c577f2c2018-08-31 09:22:23 +01007101 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007102 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7103 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7104 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007105
Jim Flynn4ed6c832019-05-20 11:02:46 +01007106 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007107 const float scale = 0.13497836f;
7108 const int32_t offset = -7;
7109
7110 outputTensorInfo.SetQuantizationScale(scale);
7111 outputTensorInfo.SetQuantizationOffset(offset);
7112 inputTensorInfo1.SetQuantizationScale(scale);
7113 inputTensorInfo1.SetQuantizationOffset(offset);
7114 inputTensorInfo2.SetQuantizationScale(scale);
7115 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007116
7117 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7118
7119 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007120 {
7121 1, 2, 3,
7122 4, 5, 6,
7123 7, 8, 9,
7124 10, 11, 12,
7125 13, 14, 15,
7126 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007127
surmeh013537c2c2018-05-18 16:31:43 +01007128 19, 20, 21,
7129 22, 23, 24,
7130 25, 26, 27,
7131 28, 29, 30,
7132 31, 32, 33,
7133 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007134
surmeh013537c2c2018-05-18 16:31:43 +01007135 37, 38, 39,
7136 40, 41, 42,
7137 43, 44, 45,
7138 46, 47, 48,
7139 49, 50, 51,
7140 52, 53, 54,
7141 })
telsoa014fcda012018-03-09 14:13:49 +00007142 );
7143
telsoa014fcda012018-03-09 14:13:49 +00007144 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7145 {
surmeh013537c2c2018-05-18 16:31:43 +01007146 1, 2, 3,
7147 4, 5, 6,
7148 7, 8, 9,
7149 10, 11, 12,
7150 13, 14, 15,
7151 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007152
surmeh013537c2c2018-05-18 16:31:43 +01007153 19, 20, 21,
7154 22, 23, 24,
7155 25, 26, 27,
7156 28, 29, 30,
7157 31, 32, 33,
7158 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007159 })
7160 );
7161
7162 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7163 {
surmeh013537c2c2018-05-18 16:31:43 +01007164 37, 38, 39,
7165 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007166 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007167 46, 47, 48,
7168 49, 50, 51,
7169 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007170 })
7171 );
7172
telsoa01c577f2c2018-08-31 09:22:23 +01007173 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007174 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007175
telsoa01c577f2c2018-08-31 09:22:23 +01007176 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007177 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007178
telsoa014fcda012018-03-09 14:13:49 +00007179
7180 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7181
7182 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7183
7184 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7185 subTensorsSupported ?
7186 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7187 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7188
7189 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7190 subTensorsSupported ?
7191 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7192 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7193
telsoa014fcda012018-03-09 14:13:49 +00007194
Jim Flynne242f2d2019-05-22 14:24:13 +01007195 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007196 armnn::WorkloadInfo info;
7197 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7198 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007199 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7200
7201 data.m_ViewOrigins.push_back(window1);
7202 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007203
Jim Flynn4ed6c832019-05-20 11:02:46 +01007204 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007205
7206 inputHandle1->Allocate();
7207 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007208 outputHandle->Allocate();
7209
7210 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7211 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007212
Derek Lambertif30f7d32019-04-09 10:25:02 +01007213 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007214 workload->Execute();
7215
7216 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7217
7218 return ret;
7219}
7220
Jim Flynn4ed6c832019-05-20 11:02:46 +01007221LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007222 armnn::IWorkloadFactory& workloadFactory,
7223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7224{
7225 unsigned int outputWidth = 3;
7226 unsigned int outputHeight = 6;
7227 unsigned int outputChannels = 3;
7228
7229 unsigned int inputWidth1 = 3;
7230 unsigned int inputHeight1 = 6;
7231 unsigned int inputChannels1 = 2;
7232
7233 unsigned int inputWidth2 = 3;
7234 unsigned int inputHeight2 = 6;
7235 unsigned int inputChannels2 = 1;
7236
7237 // Defines the tensor descriptors.
7238 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7239 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7240 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7241
Jim Flynn4ed6c832019-05-20 11:02:46 +01007242 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007243 const float scale = 0.13497836f;
7244 const int32_t offset = -7;
7245
7246 outputTensorInfo.SetQuantizationScale(scale);
7247 outputTensorInfo.SetQuantizationOffset(offset);
7248 inputTensorInfo1.SetQuantizationScale(scale);
7249 inputTensorInfo1.SetQuantizationOffset(offset);
7250 inputTensorInfo2.SetQuantizationScale(scale);
7251 inputTensorInfo2.SetQuantizationOffset(offset);
7252
7253 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7254
7255 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7256 {
7257 1, 2, 3,
7258 4, 5, 6,
7259 7, 8, 9,
7260 10, 11, 12,
7261 13, 14, 15,
7262 16, 17, 18,
7263
7264 19, 20, 21,
7265 22, 23, 24,
7266 25, 26, 27,
7267 28, 29, 30,
7268 31, 32, 33,
7269 34, 35, 36,
7270
7271 37, 38, 39,
7272 40, 41, 42,
7273 43, 44, 45,
7274 46, 47, 48,
7275 49, 50, 51,
7276 52, 53, 54,
7277 }));
7278
7279 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7280 {
7281 1, 2, 3,
7282 4, 5, 6,
7283 7, 8, 9,
7284 10, 11, 12,
7285 13, 14, 15,
7286 16, 17, 18,
7287
7288 19, 20, 21,
7289 22, 23, 24,
7290 25, 26, 27,
7291 28, 29, 30,
7292 31, 32, 33,
7293 34, 35, 36,
7294 }));
7295
7296 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7297 {
7298 37, 38, 39,
7299 40, 41, 42,
7300 43, 44, 45,
7301 46, 47, 48,
7302 49, 50, 51,
7303 52, 53, 54,
7304 }));
7305
7306 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007307 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007308
7309 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007310 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007311
7312
7313 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7314
7315 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7316
7317 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7318 subTensorsSupported ?
7319 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7320 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7321
7322 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7323 subTensorsSupported ?
7324 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7325 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7326
7327
Jim Flynne242f2d2019-05-22 14:24:13 +01007328 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007329 armnn::WorkloadInfo info;
7330 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7331 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7332 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7333
7334 data.m_ViewOrigins.push_back(window1);
7335 data.m_ViewOrigins.push_back(window2);
7336
Jim Flynn4ed6c832019-05-20 11:02:46 +01007337 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007338
7339 inputHandle1->Allocate();
7340 inputHandle2->Allocate();
7341 outputHandle->Allocate();
7342
7343 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7344 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7345
7346 workload->PostAllocationConfigure();
7347 workload->Execute();
7348
7349 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7350
7351 return ret;
7352}
telsoa014fcda012018-03-09 14:13:49 +00007353
surmeh01bceff2f2018-03-29 16:29:27 +01007354namespace
telsoa014fcda012018-03-09 14:13:49 +00007355{
Sadik Armagan2999a022019-04-09 14:20:12 +01007356template <typename T>
7357LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007358 armnn::IWorkloadFactory& workloadFactory,
7359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7360 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007361 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007362 float scale0,
7363 int32_t offset0,
7364 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007365 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007366 float scale1,
7367 int32_t offset1,
7368 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007369 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007370 float outScale,
7371 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007372{
Sadik Armagan2999a022019-04-09 14:20:12 +01007373 auto dataType = (std::is_same<T, uint8_t>::value ?
7374 armnn::DataType::QuantisedAsymm8 :
7375 armnn::DataType::QuantisedSymm16);
7376
7377 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7378 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7379 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007380
surmeh01bceff2f2018-03-29 16:29:27 +01007381 inputTensorInfo0.SetQuantizationScale(scale0);
7382 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007383
surmeh01bceff2f2018-03-29 16:29:27 +01007384 inputTensorInfo1.SetQuantizationScale(scale1);
7385 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007386
surmeh01bceff2f2018-03-29 16:29:27 +01007387 outputTensorInfo.SetQuantizationScale(outScale);
7388 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007389
Sadik Armagan2999a022019-04-09 14:20:12 +01007390 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7391 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007392
Sadik Armagan2999a022019-04-09 14:20:12 +01007393 LayerTestResult<T, 4> result(outputTensorInfo);
7394 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7395
7396 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7397 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7398 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7399
7400 armnn::AdditionQueueDescriptor data;
7401 armnn::WorkloadInfo info;
7402 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7403 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7404 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7405
7406 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7407
7408 inputHandle0->Allocate();
7409 inputHandle1->Allocate();
7410 outputHandle->Allocate();
7411
7412 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7413 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7414
Derek Lambertif30f7d32019-04-09 10:25:02 +01007415 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007416 workload->Execute();
7417
7418 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7419
7420 return result;
7421}
7422} // anonymous namespace
7423
7424LayerTestResult<uint8_t, 4> AdditionUint8Test(
7425 armnn::IWorkloadFactory& workloadFactory,
7426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7427{
7428 const unsigned int shape0[] = { 1, 2, 2, 3 };
7429 const unsigned int shape1[] = { 1, 2, 2, 3 };
7430
7431 std::vector<uint8_t> input0(
7432 {
7433 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7434 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7435 });
7436
7437 std::vector<uint8_t> input1(
7438 {
7439 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7440 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7441 });
7442
7443 std::vector<uint8_t> output(
7444 {
7445 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7446 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7447 });
7448
7449 return AdditionQuantizeTestHelper(workloadFactory,
7450 memoryManager,
7451 shape0, input0, 7.0f, 3,
7452 shape1, input1, 7.0f, 3,
7453 shape0, output, 7.0f, 3);
7454}
7455
7456LayerTestResult<int16_t, 4> AdditionInt16Test(
7457 armnn::IWorkloadFactory& workloadFactory,
7458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7459{
7460 const unsigned int shape0[] = { 1, 2, 2, 3 };
7461 const unsigned int shape1[] = { 1, 2, 2, 3 };
7462
7463 std::vector<int16_t> input0(
7464 {
7465 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7466 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7467 });
7468
7469 std::vector<int16_t> input1(
7470 {
7471 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7472 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7473 });
7474
7475 std::vector<int16_t> output(
7476 {
7477 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7478 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7479 });
7480
7481 return AdditionQuantizeTestHelper(workloadFactory,
7482 memoryManager,
7483 shape0, input0, 7.0f, 0,
7484 shape1, input1, 7.0f, 0,
7485 shape0, output, 7.0f, 0);
7486}
7487
7488namespace
7489{
7490template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7491LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7492 armnn::IWorkloadFactory& workloadFactory,
7493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7494 const unsigned int shape0[4],
7495 const std::vector<T> & values0,
7496 float scale0,
7497 int32_t offset0,
7498 const unsigned int shape1[4],
7499 const std::vector<T> & values1,
7500 float scale1,
7501 int32_t offset1,
7502 const unsigned int outShape[4],
7503 const std::vector<T> & outValues,
7504 float outScale,
7505 int32_t outOffset)
7506{
7507 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7508 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7509 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7510
7511 inputTensorInfo0.SetQuantizationScale(scale0);
7512 inputTensorInfo0.SetQuantizationOffset(offset0);
7513
7514 inputTensorInfo1.SetQuantizationScale(scale1);
7515 inputTensorInfo1.SetQuantizationOffset(offset1);
7516
7517 outputTensorInfo.SetQuantizationScale(outScale);
7518 outputTensorInfo.SetQuantizationOffset(outOffset);
7519
7520 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7521 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7522
7523 LayerTestResult<T, 4> result(outputTensorInfo);
7524 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007525
surmeh01bceff2f2018-03-29 16:29:27 +01007526 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007527 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007528 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7529
7530 armnn::MultiplicationQueueDescriptor data;
7531 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007532 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7533 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007534 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7535
7536 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7537
surmeh01bceff2f2018-03-29 16:29:27 +01007538 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007539 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007540 outputHandle->Allocate();
7541
surmeh01bceff2f2018-03-29 16:29:27 +01007542 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007543 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007544
Derek Lambertif30f7d32019-04-09 10:25:02 +01007545 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007546 workload->Execute();
7547
7548 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7549
7550 return result;
7551}
surmeh01bceff2f2018-03-29 16:29:27 +01007552} // anonymous namespace
7553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007554LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7555 armnn::IWorkloadFactory& workloadFactory,
7556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007557{
7558 unsigned int batchSize = 1;
7559 unsigned int channels = 2;
7560 unsigned int height = 2;
7561 unsigned int width = 3;
7562 const unsigned int shape[] = { batchSize, channels, height, width };
7563
telsoa01c577f2c2018-08-31 09:22:23 +01007564 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007565 std::vector<uint8_t> input0({
7566 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7567 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7568 });
7569
telsoa01c577f2c2018-08-31 09:22:23 +01007570 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007571 std::vector<uint8_t> input1({
7572 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7573 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7574 });
7575
telsoa01c577f2c2018-08-31 09:22:23 +01007576 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007577 std::vector<uint8_t> output(
7578 {
7579 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7580 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7581 });
7582
Sadik Armagan2999a022019-04-09 14:20:12 +01007583 // Scale/offset chosen to have output values out of range.
7584 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7585 memoryManager,
7586 shape,
7587 input0,
7588 4.0f,
7589 1,
7590 shape,
7591 input1,
7592 3.0f,
7593 -2,
7594 shape,
7595 output,
7596 1366.255f,
7597 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007598}
7599
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007600LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7601 armnn::IWorkloadFactory& workloadFactory,
7602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007603{
7604 const unsigned int shape0[] = { 1, 2, 2, 3 };
7605 const unsigned int shape1[] = { 1, 1, 1, 1 };
7606
7607 std::vector<uint8_t> input0({
7608 1, 2, 3, 4, 5, 6,
7609 7, 8, 9, 10, 11, 12
7610 });
7611
7612 std::vector<uint8_t> input1({2});
7613
7614 std::vector<uint8_t> output({
7615 2, 4, 6, 8, 10, 12,
7616 14, 16, 18, 20, 22, 24
7617 });
7618
Sadik Armagan2999a022019-04-09 14:20:12 +01007619 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7620 memoryManager,
7621 shape0,
7622 input0,
7623 1.0f,
7624 0,
7625 shape1,
7626 input1,
7627 1.0f,
7628 0,
7629 shape0,
7630 output,
7631 1.0f,
7632 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007633}
7634
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007635LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7636 armnn::IWorkloadFactory& workloadFactory,
7637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007638{
7639 const unsigned int shape0[] = { 1, 2, 2, 3 };
7640 const unsigned int shape1[] = { 1, 1, 1, 3 };
7641
7642 std::vector<uint8_t> input0({
7643 1, 2, 3, 4, 5, 6,
7644 7, 8, 9, 10, 11, 12
7645 });
7646
7647 std::vector<uint8_t> input1({1, 2, 3});
7648
7649 std::vector<uint8_t> output({
7650 1, 4, 9, 4, 10, 18,
7651 7, 16, 27, 10, 22, 36
7652 });
7653
Sadik Armagan2999a022019-04-09 14:20:12 +01007654 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7655 memoryManager,
7656 shape0,
7657 input0,
7658 1.0f,
7659 0,
7660 shape1,
7661 input1,
7662 1.0f,
7663 0,
7664 shape0,
7665 output,
7666 1.0f,
7667 0);
7668}
7669
7670LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7671 armnn::IWorkloadFactory& workloadFactory,
7672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7673{
7674 const unsigned int shape[] = { 1, 2, 2, 3 };
7675
7676 std::vector<int16_t> input0(
7677 {
7678 6, 7, 8, 9, 10, 11,
7679 12, 13, 14, 15, 16, 17
7680 });
7681
7682 std::vector<int16_t> input1(
7683 {
7684 1, 2, 3, 4, 5, 6,
7685 7, 8, 9, 10, 11, 12
7686 });
7687
7688 std::vector<int16_t> output(
7689 {
7690 6, 14, 24, 36, 50, 66,
7691 84, 104, 126, 150, 176, 204
7692 });
7693
7694 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7695 memoryManager,
7696 shape,
7697 input0,
7698 1.0f,
7699 0,
7700 shape,
7701 input1,
7702 1.0f,
7703 0,
7704 shape,
7705 output,
7706 1.0f,
7707 0);
7708}
7709
7710LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7711 armnn::IWorkloadFactory& workloadFactory,
7712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7713{
7714 const unsigned int shape0[] = { 1, 2, 2, 3 };
7715 const unsigned int shape1[] = { 1, 1, 1, 1 };
7716
7717 std::vector<int16_t> input0(
7718 {
7719 1, 2, 3, 4, 5, 6,
7720 7, 8, 9, 10, 11, 12
7721 });
7722
7723 std::vector<int16_t> input1({2});
7724
7725 std::vector<int16_t> output(
7726 {
7727 2, 4, 6, 8, 10, 12,
7728 14, 16, 18, 20, 22, 24
7729 });
7730
7731 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7732 memoryManager,
7733 shape0,
7734 input0,
7735 1.0f,
7736 0,
7737 shape1,
7738 input1,
7739 1.0f,
7740 0,
7741 shape0,
7742 output,
7743 1.0f,
7744 0);
7745}
7746
7747LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7748 armnn::IWorkloadFactory& workloadFactory,
7749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7750{
7751 const unsigned int shape0[] = { 1, 2, 2, 3 };
7752 const unsigned int shape1[] = { 1, 1, 1, 3 };
7753
7754 std::vector<int16_t> input0(
7755 {
7756 1, 2, 3, 4, 5, 6,
7757 7, 8, 9, 10, 11, 12
7758 });
7759
7760 std::vector<int16_t> input1({1, 2, 3});
7761
7762 std::vector<int16_t> output(
7763 {
7764 1, 4, 9, 4, 10, 18,
7765 7, 16, 27, 10, 22, 36
7766 });
7767
7768 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7769 memoryManager,
7770 shape0,
7771 input0,
7772 1.0f,
7773 0,
7774 shape1,
7775 input1,
7776 1.0f,
7777 0,
7778 shape0,
7779 output,
7780 1.0f,
7781 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007782}
telsoa014fcda012018-03-09 14:13:49 +00007783
David Beckf195f032018-09-06 16:46:34 +01007784namespace
7785{
Sadik Armagan2999a022019-04-09 14:20:12 +01007786template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007787LayerTestResult<T, 4> SubtractionTestHelper(
7788 armnn::IWorkloadFactory& workloadFactory,
7789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7790 const unsigned int shape0[4],
7791 const std::vector<T>& values0,
7792 float scale0,
7793 int32_t offset0,
7794 const unsigned int shape1[4],
7795 const std::vector<T> & values1,
7796 float scale1,
7797 int32_t offset1,
7798 const unsigned int outShape[4],
7799 const std::vector<T> & outValues,
7800 float outScale,
7801 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007802{
Sadik Armagan2999a022019-04-09 14:20:12 +01007803 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7804 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7805 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007806
7807 inputTensorInfo0.SetQuantizationScale(scale0);
7808 inputTensorInfo0.SetQuantizationOffset(offset0);
7809
7810 inputTensorInfo1.SetQuantizationScale(scale1);
7811 inputTensorInfo1.SetQuantizationOffset(offset1);
7812
7813 outputTensorInfo.SetQuantizationScale(outScale);
7814 outputTensorInfo.SetQuantizationOffset(outOffset);
7815
7816 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7817 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7818
7819 LayerTestResult<T, 4> result(outputTensorInfo);
7820 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7821
7822 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7823 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7824 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7825
7826 armnn::SubtractionQueueDescriptor data;
7827 armnn::WorkloadInfo info;
7828 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7829 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7830 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7831
7832 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7833
7834 inputHandle0->Allocate();
7835 inputHandle1->Allocate();
7836 outputHandle->Allocate();
7837
7838 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7839 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7840
Derek Lambertif30f7d32019-04-09 10:25:02 +01007841 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007842 workload->Execute();
7843
7844 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7845
7846 return result;
7847}
7848} // anonymous namespace
7849
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007850LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7851 armnn::IWorkloadFactory& workloadFactory,
7852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007853{
7854 const unsigned int shape0[] = { 1, 1, 2, 2 };
7855 const unsigned int shape1[] = { 1, 1, 2, 2 };
7856
7857 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7858 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7859 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7860
Sadik Armagan2999a022019-04-09 14:20:12 +01007861 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7862 memoryManager,
7863 shape0, input0, 0.5f, 2,
7864 shape1, input1, 1.0f, 0,
7865 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007866}
7867
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007868LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7869 armnn::IWorkloadFactory& workloadFactory,
7870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007871{
7872 const unsigned int shape0[] = { 1, 1, 2, 2 };
7873 const unsigned int shape1[] = { 1, 1, 1, 1 };
7874
7875 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7876 std::vector<uint8_t> input1({ 2 });
7877 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7878
Sadik Armagan2999a022019-04-09 14:20:12 +01007879 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7880 memoryManager,
7881 shape0, input0, 0.5f, 2,
7882 shape1, input1, 1.0f, 0,
7883 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007884}
7885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007886LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7887 armnn::IWorkloadFactory& workloadFactory,
7888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007889{
7890 const unsigned int shape0[] = { 1, 1, 2, 2 };
7891 const unsigned int shape1[] = { 1, 1, 2, 1 };
7892
7893 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7894 std::vector<uint8_t> input1({ 2, 1 });
7895 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7896
Sadik Armagan2999a022019-04-09 14:20:12 +01007897 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7898 memoryManager,
7899 shape0, input0, 1.0f, 0,
7900 shape1, input1, 1.0f, 0,
7901 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007902}
7903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007904LayerTestResult<float, 4> SubtractionTest(
7905 armnn::IWorkloadFactory& workloadFactory,
7906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007907{
7908 const unsigned int shape0[] = { 1, 1, 2, 2 };
7909 const unsigned int shape1[] = { 1, 1, 2, 2 };
7910
7911 std::vector<float> input0({ 1, 2, 3, 4 });
7912 std::vector<float> input1({ 1, -1, 0, 2 });
7913 std::vector<float> output({ 0, 3, 3, 2 });
7914
Sadik Armagan2999a022019-04-09 14:20:12 +01007915 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7916 memoryManager,
7917 shape0, input0, 1.0f, 0,
7918 shape1, input1, 1.0f, 0,
7919 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007920}
7921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007922LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7923 armnn::IWorkloadFactory& workloadFactory,
7924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007925{
7926 const unsigned int shape0[] = { 1, 1, 2, 2 };
7927 const unsigned int shape1[] = { 1, 1, 1, 1 };
7928
7929 std::vector<float> input0({ 1, 2, 3, 4 });
7930 std::vector<float> input1({ 10 });
7931 std::vector<float> output({ -9, -8, -7, -6 });
7932
Sadik Armagan2999a022019-04-09 14:20:12 +01007933 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7934 memoryManager,
7935 shape0, input0, 1.0f, 0,
7936 shape1, input1, 1.0f, 0,
7937 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007938}
7939
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007940LayerTestResult<float, 4> SubtractionBroadcastTest(
7941 armnn::IWorkloadFactory& workloadFactory,
7942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007943{
7944 const unsigned int shape0[] = { 1, 1, 2, 2 };
7945 const unsigned int shape1[] = { 1, 1, 1, 2 };
7946
7947 std::vector<float> input0({ 1, 2, 3, 4 });
7948 std::vector<float> input1({ 10, -5 });
7949 std::vector<float> output({ -9, 7, -7, 9 });
7950
Sadik Armagan2999a022019-04-09 14:20:12 +01007951 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7952 memoryManager,
7953 shape0, input0, 1.0f, 0,
7954 shape1, input1, 1.0f, 0,
7955 shape0, output, 1.0f, 0);
7956}
7957
7958LayerTestResult<int16_t, 4> SubtractionInt16Test(
7959 armnn::IWorkloadFactory& workloadFactory,
7960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7961{
7962 const unsigned int shape0[] = { 1, 1, 2, 2 };
7963 const unsigned int shape1[] = { 1, 1, 2, 2 };
7964
7965 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7966 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7967 std::vector<int16_t> output({ 3, 3, 5, 5 });
7968
7969 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7970 memoryManager,
7971 shape0, input0, 0.5f, 0,
7972 shape1, input1, 1.0f, 0,
7973 shape0, output, 1.0f, 0);
7974}
7975
7976LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7977 armnn::IWorkloadFactory& workloadFactory,
7978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7979{
7980 const unsigned int shape0[] = { 1, 1, 2, 2 };
7981 const unsigned int shape1[] = { 1, 1, 1, 1 };
7982
7983 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7984 std::vector<int16_t> input1({ 2 });
7985 std::vector<int16_t> output({ 3, 4, 5, 6 });
7986
7987 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7988 memoryManager,
7989 shape0, input0, 0.5f, 0,
7990 shape1, input1, 1.0f, 0,
7991 shape0, output, 1.0f, 0);
7992}
7993
7994LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7995 armnn::IWorkloadFactory& workloadFactory,
7996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7997{
7998 const unsigned int shape0[] = { 1, 1, 2, 2 };
7999 const unsigned int shape1[] = { 1, 1, 2, 1 };
8000
8001 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8002 std::vector<int16_t> input1({ 2, 1 });
8003 std::vector<int16_t> output({ 8, 11, 12, 15 });
8004
8005 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8006 memoryManager,
8007 shape0, input0, 1.0f, 0,
8008 shape1, input1, 1.0f, 0,
8009 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008010}
8011
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008012LayerTestResult<float, 4> BatchNormTest(
8013 armnn::IWorkloadFactory& workloadFactory,
8014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008015{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008016 // BatchSize: 1
8017 // Channels: 2
8018 // Height: 3
8019 // Width: 2
8020
8021 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8022 std::vector<float> inputValues
8023 {
8024 // Batch 0, Channel 0, Height (3) x Width (2)
8025 1.f, 4.f,
8026 4.f, 2.f,
8027 1.f, 6.f,
8028
8029 // Batch 0, Channel 1, Height (3) x Width (2)
8030 1.f, 1.f,
8031 4.f, 1.f,
8032 -2.f, 4.f
8033 };
8034 std::vector<float> expectedOutputValues
8035 {
8036 // Batch 0, Channel 0, Height (3) x Width (2)
8037 1.f, 4.f,
8038 4.f, 2.f,
8039 1.f, 6.f,
8040
8041 // Batch 0, Channel 1, Height (3) x Width (2)
8042 3.f, 3.f,
8043 4.f, 3.f,
8044 2.f, 4.f
8045 };
8046
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008047 return BatchNormTestImpl<armnn::DataType::Float32>(
8048 workloadFactory, memoryManager,
8049 inputOutputShape, inputValues, expectedOutputValues,
8050 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008051}
8052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008053LayerTestResult<float, 4> BatchNormNhwcTest(
8054 armnn::IWorkloadFactory& workloadFactory,
8055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008056{
8057 // BatchSize: 1
8058 // Height: 3
8059 // Width: 2
8060 // Channels: 2
8061
8062 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8063 std::vector<float> inputValues
8064 {
8065 // Batch 0, Height 0, Width (2) x Channel (2)
8066 1.f, 1.f,
8067 4.f, 1.f,
8068
8069 // Batch 0, Height 1, Width (2) x Channel (2)
8070 4.f, 4.f,
8071 2.f, 1.f,
8072
8073 // Batch 0, Height 2, Width (2) x Channel (2)
8074 1.f, -2.f,
8075 6.f, 4.f
8076 };
8077 std::vector<float> expectedOutputValues
8078 {
8079 // Batch 0, Height 0, Width (2) x Channel (2)
8080 1.f, 3.f,
8081 4.f, 3.f,
8082
8083 // Batch 0, Height 1, Width (2) x Channel (2)
8084 4.f, 4.f,
8085 2.f, 3.f,
8086
8087 // Batch 0, Height 2, Width (2) x Channel (2)
8088 1.f, 2.f,
8089 6.f, 4.f
8090 };
8091
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008092 return BatchNormTestImpl<armnn::DataType::Float32>(
8093 workloadFactory, memoryManager,
8094 inputOutputShape, inputValues, expectedOutputValues,
8095 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008096}
8097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008098LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8099 armnn::IWorkloadFactory& workloadFactory,
8100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008101{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008102 // BatchSize: 1
8103 // Channels: 2
8104 // Height: 3
8105 // Width: 2
8106
8107 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8108 std::vector<float> inputValues
8109 {
8110 // Batch 0, Channel 0, Height (3) x Width (2)
8111 1.f, 4.f,
8112 4.f, 2.f,
8113 1.f, 6.f,
8114
8115 // Batch 0, Channel 1, Height (3) x Width (2)
8116 1.f, 1.f,
8117 4.f, 1.f,
8118 -2.f, 4.f
8119 };
8120 std::vector<float> expectedOutputValues
8121 {
8122 // Batch 0, Channel 0, Height (3) x Width (2)
8123 1.f, 4.f,
8124 4.f, 2.f,
8125 1.f, 6.f,
8126
8127 // Batch 0, Channel 1, Height (3) x Width (2)
8128 3.f, 3.f,
8129 4.f, 3.f,
8130 2.f, 4.f
8131 };
8132
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008133 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8134 workloadFactory, memoryManager,
8135 inputOutputShape, inputValues, expectedOutputValues,
8136 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008137}
8138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008139LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8140 armnn::IWorkloadFactory& workloadFactory,
8141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008142{
8143 // BatchSize: 1
8144 // Height: 3
8145 // Width: 2
8146 // Channels: 2
8147
8148 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8149 std::vector<float> inputValues
8150 {
8151 // Batch 0, Height 0, Width (2) x Channel (2)
8152 1.f, 1.f,
8153 4.f, 1.f,
8154
8155 // Batch 0, Height 1, Width (2) x Channel (2)
8156 4.f, 4.f,
8157 2.f, 1.f,
8158
8159 // Batch 0, Height 2, Width (2) x Channel (2)
8160 1.f, -2.f,
8161 6.f, 4.f
8162 };
8163 std::vector<float> expectedOutputValues
8164 {
8165 // Batch 0, Height 0, Width (2) x Channel (2)
8166 1.f, 3.f,
8167 4.f, 3.f,
8168
8169 // Batch 0, Height 1, Width (2) x Channel (2)
8170 4.f, 4.f,
8171 2.f, 3.f,
8172
8173 // Batch 0, Height 2, Width (2) x Channel (2)
8174 1.f, 2.f,
8175 6.f, 4.f
8176 };
8177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008178 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8179 (workloadFactory, memoryManager,
8180 inputOutputShape, inputValues, expectedOutputValues,
8181 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008182}
8183
Matteo Martincighf5507132019-06-04 10:59:47 +01008184LayerTestResult<int16_t, 4> BatchNormInt16Test(
8185 armnn::IWorkloadFactory& workloadFactory,
8186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8187{
8188 // BatchSize: 1
8189 // Channels: 2
8190 // Height: 3
8191 // Width: 2
8192
8193 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8194 std::vector<float> inputValues
8195 {
8196 // Batch 0, Channel 0, Height (3) x Width (2)
8197 1.f, 4.f,
8198 4.f, 2.f,
8199 1.f, 6.f,
8200
8201 // Batch 0, Channel 1, Height (3) x Width (2)
8202 1.f, 1.f,
8203 4.f, 1.f,
8204 -2.f, 4.f
8205 };
8206 std::vector<float> expectedOutputValues
8207 {
8208 // Batch 0, Channel 0, Height (3) x Width (2)
8209 1.f, 4.f,
8210 4.f, 2.f,
8211 1.f, 6.f,
8212
8213 // Batch 0, Channel 1, Height (3) x Width (2)
8214 3.f, 3.f,
8215 4.f, 3.f,
8216 2.f, 4.f
8217 };
8218
8219 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8220 workloadFactory, memoryManager,
8221 inputOutputShape, inputValues, expectedOutputValues,
8222 1.f/20.f, 50, armnn::DataLayout::NCHW);
8223}
8224
8225LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8226 armnn::IWorkloadFactory& workloadFactory,
8227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8228{
8229 // BatchSize: 1
8230 // Height: 3
8231 // Width: 2
8232 // Channels: 2
8233
8234 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8235 std::vector<float> inputValues
8236 {
8237 // Batch 0, Height 0, Width (2) x Channel (2)
8238 1.f, 1.f,
8239 4.f, 1.f,
8240
8241 // Batch 0, Height 1, Width (2) x Channel (2)
8242 4.f, 4.f,
8243 2.f, 1.f,
8244
8245 // Batch 0, Height 2, Width (2) x Channel (2)
8246 1.f, -2.f,
8247 6.f, 4.f
8248 };
8249 std::vector<float> expectedOutputValues
8250 {
8251 // Batch 0, Height 0, Width (2) x Channel (2)
8252 1.f, 3.f,
8253 4.f, 3.f,
8254
8255 // Batch 0, Height 1, Width (2) x Channel (2)
8256 4.f, 4.f,
8257 2.f, 3.f,
8258
8259 // Batch 0, Height 2, Width (2) x Channel (2)
8260 1.f, 2.f,
8261 6.f, 4.f
8262 };
8263
8264 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8265 (workloadFactory, memoryManager,
8266 inputOutputShape, inputValues, expectedOutputValues,
8267 1.f/20.f, 50, armnn::DataLayout::NHWC);
8268}
8269
Nina Drozd58ef2c62019-05-16 12:09:18 +01008270LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008271 armnn::IWorkloadFactory& workloadFactory,
8272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008273{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008274 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008275}
8276
Nina Drozd58ef2c62019-05-16 12:09:18 +01008277LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8278 armnn::IWorkloadFactory& workloadFactory,
8279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8280{
8281 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8282}
8283
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008284LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8285 armnn::IWorkloadFactory& workloadFactory,
8286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008287{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008288 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008289}
8290
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008291LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8292 armnn::IWorkloadFactory& workloadFactory,
8293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008294{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008295 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008296}
8297
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008298LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8299 armnn::IWorkloadFactory& workloadFactory,
8300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008301{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008302 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008303}
8304
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008305LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8306 armnn::IWorkloadFactory& workloadFactory,
8307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008308{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008309 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8310 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008311}
8312
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008313LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8314 armnn::IWorkloadFactory& workloadFactory,
8315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008316{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008317 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8318 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008319}
8320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008321LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8322 armnn::IWorkloadFactory& workloadFactory,
8323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008324{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008325 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008326}
8327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008328LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8329 armnn::IWorkloadFactory& workloadFactory,
8330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008331{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008332 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008333}
8334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008335LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8336 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8338 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008339{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008340 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8341 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008342}
8343
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008344LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8345 armnn::IWorkloadFactory& workloadFactory,
8346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008348 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008349}
8350
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008351LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8352 armnn::IWorkloadFactory& workloadFactory,
8353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008354{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008355 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8356 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008357}
8358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008359LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8360 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8362 bool useSubtensor)
8363{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008364 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8365 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008366}
8367
8368LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8369 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008371{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008372 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008373}
8374
8375LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8376 armnn::IWorkloadFactory& workloadFactory,
8377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8378{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008379 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008380}
8381
8382LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8383 armnn::IWorkloadFactory& workloadFactory,
8384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8385{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008386 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008387}
8388
8389LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8390 armnn::IWorkloadFactory& workloadFactory,
8391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8392{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008393 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8394 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008395}
8396
8397LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8398 armnn::IWorkloadFactory& workloadFactory,
8399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008401 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8402 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008403}
8404
8405LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8406 armnn::IWorkloadFactory& workloadFactory,
8407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008409 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8410 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008411}
8412
8413LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8414 armnn::IWorkloadFactory& workloadFactory,
8415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8416{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008417 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8418 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008419}
8420
8421LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8422 armnn::IWorkloadFactory& workloadFactory,
8423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8424 bool useSubtensor)
8425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008426 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8427 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008428}
8429
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008430LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8431 armnn::IWorkloadFactory& workloadFactory,
8432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8433 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008434{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008435 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8436 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008437}
8438
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008439LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8440 armnn::IWorkloadFactory& workloadFactory,
8441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8442 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008443{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008444 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008445 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008446}
8447
Teresa Charlin0434df62019-06-06 13:40:35 +01008448LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8449 armnn::IWorkloadFactory& workloadFactory,
8450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8451 bool forceNoPadding)
8452{
8453 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8454 workloadFactory, memoryManager, forceNoPadding);
8455}
8456
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008457LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8458 armnn::IWorkloadFactory& workloadFactory,
8459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8460 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008461{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008462 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8463 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008464}
8465
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008466LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8467 armnn::IWorkloadFactory& workloadFactory,
8468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8469 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008470{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008471 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008472 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008473}
8474
Teresa Charlin0434df62019-06-06 13:40:35 +01008475LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8476 armnn::IWorkloadFactory& workloadFactory,
8477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8478 bool forceNoPadding)
8479{
8480 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8481 workloadFactory, memoryManager, forceNoPadding);
8482}
8483
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008484LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8485 armnn::IWorkloadFactory& workloadFactory,
8486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008487 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008488{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008489 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008490}
8491
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008492LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8493 armnn::IWorkloadFactory& workloadFactory,
8494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008495 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008496{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008497 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008498}
8499
Teresa Charlin0434df62019-06-06 13:40:35 +01008500LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8501 armnn::IWorkloadFactory& workloadFactory,
8502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8503 const armnn::DataLayout dataLayout)
8504{
8505 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8506}
8507LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8508 armnn::IWorkloadFactory& workloadFactory,
8509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8510{
8511 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8512}
8513
8514LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8515 armnn::IWorkloadFactory& workloadFactory,
8516 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8517{
8518 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8519 workloadFactory, memoryManager, 1.0f, -5);
8520}
8521
8522LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8523 armnn::IWorkloadFactory& workloadFactory,
8524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8525{
8526 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8527 workloadFactory, memoryManager);
8528}
8529
8530LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8531 armnn::IWorkloadFactory& workloadFactory,
8532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8533{
8534 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8535}
8536
8537LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8538 armnn::IWorkloadFactory& workloadFactory,
8539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8540{
8541 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8542 workloadFactory, memoryManager, 1.0f, -5);
8543}
8544
8545LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8546 armnn::IWorkloadFactory& workloadFactory,
8547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8548{
8549 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8550 workloadFactory, memoryManager);
8551}
8552
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008553LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8554 armnn::IWorkloadFactory& workloadFactory,
8555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008556 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008557{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008558 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008559}
8560
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008561LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8562 armnn::IWorkloadFactory& workloadFactory,
8563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008564 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008565{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008566 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008567 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008568}
8569
Teresa Charlin0434df62019-06-06 13:40:35 +01008570LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8571 armnn::IWorkloadFactory& workloadFactory,
8572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8573 const armnn::DataLayout dataLayout)
8574{
8575 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8576 workloadFactory, memoryManager, dataLayout);
8577}
8578
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008579LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8580 armnn::IWorkloadFactory& workloadFactory,
8581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8582 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008583{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008584 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008585 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008586}
8587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008588LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8589 armnn::IWorkloadFactory& workloadFactory,
8590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008591{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008592 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008593}
8594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008595LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8596 armnn::IWorkloadFactory& workloadFactory,
8597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008598{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008599 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8600 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008601}
8602
Teresa Charlin0434df62019-06-06 13:40:35 +01008603LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8604 armnn::IWorkloadFactory& workloadFactory,
8605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8606{
8607 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8608 workloadFactory, memoryManager);
8609}
8610LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8611 armnn::IWorkloadFactory& workloadFactory,
8612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8613{
8614 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8615}
8616
8617LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8618 armnn::IWorkloadFactory& workloadFactory,
8619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8620{
8621 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8622 workloadFactory, memoryManager);
8623}
8624
8625LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8626 armnn::IWorkloadFactory& workloadFactory,
8627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8628{
8629 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8630 workloadFactory, memoryManager);
8631}
8632
8633LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8634 armnn::IWorkloadFactory& workloadFactory,
8635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8636{
8637 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8638 workloadFactory, memoryManager);
8639}
8640
8641LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8642 armnn::IWorkloadFactory& workloadFactory,
8643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8644{
8645 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8646 workloadFactory, memoryManager);
8647}
8648
8649LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8650 armnn::IWorkloadFactory& workloadFactory,
8651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8652{
8653 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8654 workloadFactory, memoryManager);
8655}
8656
8657LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8658 armnn::IWorkloadFactory& workloadFactory,
8659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8660{
8661 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8662}
8663
8664LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8665 armnn::IWorkloadFactory& workloadFactory,
8666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8667{
8668 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8669 workloadFactory, memoryManager);
8670}
8671
8672LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8673 armnn::IWorkloadFactory& workloadFactory,
8674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8675{
8676 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8677 workloadFactory, memoryManager);
8678}
8679
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008680LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8681 armnn::IWorkloadFactory& workloadFactory,
8682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008683 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008684{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008685 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008686}
8687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008688LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8689 armnn::IWorkloadFactory& workloadFactory,
8690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008691 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008692{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008693 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008694}
8695
Teresa Charlin0434df62019-06-06 13:40:35 +01008696LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8697 armnn::IWorkloadFactory& workloadFactory,
8698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8699 const armnn::DataLayout dataLayout)
8700{
8701 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8702}
8703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008704LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8705 armnn::IWorkloadFactory& workloadFactory,
8706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008707{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008708 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008709}
8710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008711LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8712 armnn::IWorkloadFactory& workloadFactory,
8713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008714{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008715 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008716}
8717
Teresa Charlin0434df62019-06-06 13:40:35 +01008718LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
8719 armnn::IWorkloadFactory& workloadFactory,
8720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8721{
8722 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8723}
8724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008725LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8726 armnn::IWorkloadFactory& workloadFactory,
8727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008729 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008730}
8731
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008732LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8733 armnn::IWorkloadFactory& workloadFactory,
8734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008735{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008736 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008737}
8738
Teresa Charlin0434df62019-06-06 13:40:35 +01008739LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
8740 armnn::IWorkloadFactory& workloadFactory,
8741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8742{
8743 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8744}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008745LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8746 armnn::IWorkloadFactory& workloadFactory,
8747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008748{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008749 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008750}
8751
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008752LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8753 armnn::IWorkloadFactory& workloadFactory,
8754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008755{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008756 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008757}
8758
Teresa Charlin0434df62019-06-06 13:40:35 +01008759LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
8760 armnn::IWorkloadFactory& workloadFactory,
8761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8762{
8763 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8764}
8765
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008766LayerTestResult<float, 4> L2Pooling2dSize7Test(
8767 armnn::IWorkloadFactory& workloadFactory,
8768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008769{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008770 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008771}
8772
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008773LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8774 armnn::IWorkloadFactory& workloadFactory,
8775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008776{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008777 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008778}
8779
Teresa Charlin0434df62019-06-06 13:40:35 +01008780LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
8781 armnn::IWorkloadFactory& workloadFactory,
8782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8783{
8784 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8785}
8786
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008787LayerTestResult<float, 4> L2Pooling2dSize9Test(
8788 armnn::IWorkloadFactory& workloadFactory,
8789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008790{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008791 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008792}
8793
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008794LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8795 armnn::IWorkloadFactory& workloadFactory,
8796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008797{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008798 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008799}
8800
Teresa Charlin0434df62019-06-06 13:40:35 +01008801LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
8802 armnn::IWorkloadFactory& workloadFactory,
8803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8804{
8805 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8806}
8807LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8808 armnn::IWorkloadFactory& workloadFactory,
8809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8810{
8811 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8812}
8813
8814LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8815 armnn::IWorkloadFactory& workloadFactory,
8816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8817{
8818 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8819}
8820
8821LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
8822 armnn::IWorkloadFactory& workloadFactory,
8823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8824{
8825 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8826}
8827
8828LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8829 armnn::IWorkloadFactory& workloadFactory,
8830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8831{
8832 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8833}
8834
8835LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8836 armnn::IWorkloadFactory& workloadFactory,
8837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8838{
8839 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
8840}
8841
8842LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
8843 armnn::IWorkloadFactory& workloadFactory,
8844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8845{
8846 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8847}
8848
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008849LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8850 armnn::IWorkloadFactory& workloadFactory,
8851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008852{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008853 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008854}
8855
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008856LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8857 armnn::IWorkloadFactory& workloadFactory,
8858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008859{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008860 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008861}
8862
Teresa Charlin0434df62019-06-06 13:40:35 +01008863LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
8864 armnn::IWorkloadFactory& workloadFactory,
8865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8866{
8867 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
8868}
8869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008870LayerTestResult<float, 4> ComparePooling2dTest(
8871 armnn::IWorkloadFactory& workloadFactory,
8872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8873 armnn::IWorkloadFactory& refWorkloadFactory,
8874 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008875{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008876 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008877 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008878}
8879
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008880LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8881 armnn::IWorkloadFactory& workloadFactory,
8882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8883 armnn::IWorkloadFactory& refWorkloadFactory,
8884 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008885{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008886 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008887 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008888}
8889
Teresa Charlin0434df62019-06-06 13:40:35 +01008890LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
8891 armnn::IWorkloadFactory& workloadFactory,
8892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8893 armnn::IWorkloadFactory& refWorkloadFactory,
8894 armnn::PoolingAlgorithm poolingType)
8895{
8896 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8897 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
8898}
8899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008900LayerTestResult<float, 2> FullyConnectedLargeTest(
8901 armnn::IWorkloadFactory& workloadFactory,
8902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8903 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008905 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008906}
8907
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008908LayerTestResult<float, 4> SimplePermuteFloat32Test(
8909 armnn::IWorkloadFactory& workloadFactory,
8910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008911{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008912 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008913};
8914
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008915LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8916 armnn::IWorkloadFactory& workloadFactory,
8917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008918{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008919 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008920};
surmeh01bceff2f2018-03-29 16:29:27 +01008921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008922LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8923 armnn::IWorkloadFactory& workloadFactory,
8924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008925{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008926 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008927};
8928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008929LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8930 armnn::IWorkloadFactory& workloadFactory,
8931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008932{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008933 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008934};
8935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008936LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8937 armnn::IWorkloadFactory& workloadFactory,
8938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008939{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008940 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008941};
8942
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008943LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8944 armnn::IWorkloadFactory& workloadFactory,
8945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008946{
8947 // Create Initial Tensor
8948 // 1, 2, 3
8949 // 4, 5, 6
8950 // 7, 8, 9
8951
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008952 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8953 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008954
8955 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8956 {1, 2, 3,
8957 4, 5, 6,
8958 7, 8, 9
8959 });
8960
8961 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8962 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8963 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8964 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8965
8966 // Apply MaxPool poolSize = 1x1, stride=2x2
8967 // Result =
8968 // 1, 3
8969 // 7, 9
8970 armnn::Pooling2dDescriptor descriptor;
8971 descriptor.m_PoolHeight = 1;
8972 descriptor.m_PoolWidth = 1;
8973 descriptor.m_StrideX = 2;
8974 descriptor.m_StrideY = 2;
8975 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
8976
8977 armnn::Pooling2dQueueDescriptor queueDescriptor;
8978 queueDescriptor.m_Parameters = descriptor;
8979 armnn::WorkloadInfo workloadInfo;
8980 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
8981 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
8982
8983 // Create the MaxPool
8984 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
8985
8986 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
8987 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
8988 boost::multi_array<float, 4> resultMaxPool;
8989 resultMaxPool.resize(shape);
8990
8991
8992 // Create addition with another tensor the same size
8993 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
8994 // with the initial tensor.
8995 // 12, 16
8996 // 24, 28
8997
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008998 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
8999 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009000
9001 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9002 {12, 16,
9003 24, 28,
9004 });
9005
9006 // Expected output tensor after MaxPool and Addition.
9007 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9008 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9009 {
9010 13, 19,
9011 31, 37
9012 }));
9013
9014 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9015 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9016
9017 armnn::AdditionQueueDescriptor data;
9018 armnn::WorkloadInfo info;
9019
9020 // Add the output of the MaxPool and the new tensor
9021 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9022 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9023 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9024
9025 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9026
9027 poolingInputHandle->Allocate();
9028 poolingOutputHandle->Allocate();
9029 addInputHandle->Allocate();
9030 addOutputHandle->Allocate();
9031
9032 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9033 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9034
9035 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9036 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9037
Derek Lambertif30f7d32019-04-09 10:25:02 +01009038 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009039 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009040 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009041 addWorkload->Execute();
9042
9043 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9044
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009045 return addRet;
9046}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009047
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009048LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9049 armnn::IWorkloadFactory& workloadFactory,
9050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009051{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009052 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009053}
9054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009055LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9056 armnn::IWorkloadFactory& workloadFactory,
9057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009059 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009060}
9061
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009062LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9063 armnn::IWorkloadFactory& workloadFactory,
9064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009065{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009066 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009067}
9068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009069LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9070 armnn::IWorkloadFactory& workloadFactory,
9071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009072{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009073 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009074}
9075
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009076LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9077 armnn::IWorkloadFactory& workloadFactory,
9078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009079{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009080 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009081}
9082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009083LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9084 armnn::IWorkloadFactory& workloadFactory,
9085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009086{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009087 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009088}
9089
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009090LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9091 armnn::IWorkloadFactory& workloadFactory,
9092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009093{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009094 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009095}
9096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009097LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9098 armnn::IWorkloadFactory& workloadFactory,
9099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009100{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009101 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009102}
9103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009104LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9105 armnn::IWorkloadFactory& workloadFactory,
9106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009107{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009108 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009109}
9110
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009111LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9112 armnn::IWorkloadFactory& workloadFactory,
9113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009114{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009115 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009116}
9117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009118LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9119 armnn::IWorkloadFactory& workloadFactory,
9120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009122 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009123}
9124
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009125LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9126 armnn::IWorkloadFactory& workloadFactory,
9127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009128{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009129 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009130}
9131
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009132LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9133 armnn::IWorkloadFactory& workloadFactory,
9134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009135{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009136 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009137}
9138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009139LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9140 armnn::IWorkloadFactory& workloadFactory,
9141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009142{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009143 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009144}
9145
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009146LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9147 armnn::IWorkloadFactory& workloadFactory,
9148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009149{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009150 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009151}
9152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009153LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9154 armnn::IWorkloadFactory& workloadFactory,
9155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009156{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009157 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009158}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009159
nikraj01120522a2019-05-31 11:33:07 +01009160LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9161 armnn::IWorkloadFactory& workloadFactory,
9162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9163{
9164 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9165}
9166
9167LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9168 armnn::IWorkloadFactory& workloadFactory,
9169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9170{
9171 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9172}
9173
9174LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9175 armnn::IWorkloadFactory& workloadFactory,
9176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9177{
9178 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9179}
9180
9181LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9182 armnn::IWorkloadFactory& workloadFactory,
9183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9184{
9185 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9186}
9187
9188LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9189 armnn::IWorkloadFactory& workloadFactory,
9190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9191{
9192 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9193}
9194
9195LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9196 armnn::IWorkloadFactory& workloadFactory,
9197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9198{
9199 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9200}
9201
9202LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9203 armnn::IWorkloadFactory& workloadFactory,
9204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9205{
9206 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9207}
9208
9209LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9210 armnn::IWorkloadFactory& workloadFactory,
9211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9212{
9213 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9214}
9215
Keith Davisa57eccb2019-06-14 17:33:22 +01009216
9217LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9218 armnn::IWorkloadFactory& workloadFactory,
9219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9220{
9221 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9222 workloadFactory,
9223 memoryManager);
9224}
9225
9226LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9229{
9230 return SpaceToDepthSimpleTest<armnn::DataType::QuantisedAsymm8>(
9231 workloadFactory,
9232 memoryManager,
9233 armnn::DataLayout::NCHW);
9234}
9235
9236LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test(
9237 armnn::IWorkloadFactory& workloadFactory,
9238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9239{
9240 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9241 workloadFactory,
9242 memoryManager);
9243}
9244
9245LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test(
9246 armnn::IWorkloadFactory& workloadFactory,
9247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9248{
9249 return SpaceToDepthFloatTest<armnn::DataType::Float32>(
9250 workloadFactory,
9251 memoryManager,
9252 armnn::DataLayout::NCHW);
9253}
9254
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009255namespace {
9256
9257template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009258LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9259 armnn::IWorkloadFactory &workloadFactory,
9260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9261 const armnn::DataLayout& dataLayout,
9262 const unsigned int *inputShape,
9263 const std::vector<T> &inputData,
9264 const std::vector<unsigned int> &blockShape,
9265 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9266 const unsigned int *outputShape,
9267 const std::vector<T> &outputData,
9268 float scale = 1.0f,
9269 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009270{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009271 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9272
9273 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9274 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9275
9276 inputTensorInfo.SetQuantizationScale(scale);
9277 inputTensorInfo.SetQuantizationOffset(offset);
9278
9279 outputTensorInfo.SetQuantizationScale(scale);
9280 outputTensorInfo.SetQuantizationOffset(offset);
9281
9282 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9283
9284 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9285 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9286
9287 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9288 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9289
9290 armnn::BatchToSpaceNdQueueDescriptor data;
9291 data.m_Parameters.m_DataLayout = dataLayout;
9292 data.m_Parameters.m_BlockShape = blockShape;
9293 data.m_Parameters.m_Crops = crops;
9294 armnn::WorkloadInfo info;
9295 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9296 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9297
9298 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9299
9300 inputHandle->Allocate();
9301 outputHandle->Allocate();
9302
9303 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9304
Derek Lambertif30f7d32019-04-09 10:25:02 +01009305 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009306 workload->Execute();
9307
9308 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9309
9310 return result;
9311}
9312
9313} // anonymous namespace
9314
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009315LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9316 armnn::IWorkloadFactory& workloadFactory,
9317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009318{
9319 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009320 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009321
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009322 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009323 // Batch 0, Height 0, Width (2) x Channel (1)
9324 1.0f, 3.0f,
9325 // Batch 0, Height 1, Width (2) x Channel (1)
9326 9.0f, 11.0f,
9327
9328
9329 // Batch 1, Height 0, Width (2) x Channel (1)
9330 2.0f, 4.0f,
9331 // Batch 1, Height 1, Width (2) x Channel (1)
9332 10.0f, 12.0f,
9333
9334
9335 // Batch 2, Height 0, Width (2) x Channel (1)
9336 5.0f, 7.0f,
9337 // Batch 2, Height 1, Width (2) x Channel (1)
9338 13.0f, 15.0f,
9339
9340 // Batch 3, Height 0, Width (2) x Channel (3)
9341 6.0f, 8.0f,
9342 // Batch 3, Height 1, Width (2) x Channel (1)
9343 14.0f, 16.0f
9344 });
9345
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009346 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009347 1.0f, 2.0f, 3.0f, 4.0f,
9348 5.0f, 6.0f, 7.0f, 8.0f,
9349 9.0f, 10.0f, 11.0f, 12.0f,
9350 13.0f, 14.0f, 15.0f, 16.0f
9351 });
9352
9353 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009354 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009356 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9357 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009358 crops, outputShape, expectedOutput);
9359}
9360
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009361LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9362 armnn::IWorkloadFactory& workloadFactory,
9363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009364{
9365 const unsigned int inputShape[] = {4, 1, 1, 1};
9366 const unsigned int outputShape[] = {1, 2, 2, 1};
9367
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009368 std::vector<float> input({
9369 // Batch 0, Height 0, Width (2) x Channel (1)
9370 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009371 });
9372
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009373 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009374
9375 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009376 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009377
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009378 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9379 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9380 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009381}
9382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009383LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9384 armnn::IWorkloadFactory& workloadFactory,
9385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009386{
9387 const unsigned int inputShape[] = {4, 1, 1, 3};
9388 const unsigned int outputShape[] = {1, 2, 2, 3};
9389
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009390 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009391
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009392 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009393
9394 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009395 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009397 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9398 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9399 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009400}
9401
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009402LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9403 armnn::IWorkloadFactory& workloadFactory,
9404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9405{
9406 const unsigned int inputShape[] = {8, 1, 3, 1};
9407 const unsigned int outputShape[] = {2, 2, 4, 1};
9408
9409 std::vector<float> input({
9410 0.0f, 1.0f, 3.0f,
9411 0.0f, 9.0f, 11.0f,
9412 0.0f, 2.0f, 4.0f,
9413 0.0f, 10.0f, 12.0f,
9414 0.0f, 5.0f, 7.0f,
9415 0.0f, 13.0f, 15.0f,
9416 0.0f, 6.0f, 8.0f,
9417 0.0f, 14.0f, 16.0f
9418 });
9419
9420 std::vector<float> expectedOutput({
9421 1.0f, 2.0f, 3.0f, 4.0f,
9422 5.0f, 6.0f, 7.0f, 8.0f,
9423 9.0f, 10.0f, 11.0f, 12.0f,
9424 13.0f, 14.0f, 15.0f, 16.0f
9425 });
9426
9427 std::vector<unsigned int> blockShape({2, 2});
9428 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9429
9430 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9431 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9432 crops, outputShape, expectedOutput);
9433}
9434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009435LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9436 armnn::IWorkloadFactory &workloadFactory,
9437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009438{
9439 const unsigned int inputShape[] = {4, 3, 1, 1};
9440 const unsigned int outputShape[] = {1, 3, 2, 2};
9441
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009442 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009443
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009444 std::vector<float> expectedOutput({
9445 // Batch 0, Channel 0, Height (2) x Width (2)
9446 1.0f, 4.0f,
9447 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009448
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009449 // Batch 0, Channel 1, Height (2) x Width (2)
9450 2.0f, 5.0f,
9451 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009452
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009453 // Batch 0, Channel 2, Height (2) x Width (2)
9454 3.0f, 6.0f,
9455 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009456 });
9457
9458 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009459 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009461 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9462 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9463 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009464}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009465
Mike Kelly831faed2018-11-28 11:52:08 +00009466LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009467 armnn::IWorkloadFactory& workloadFactory,
9468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009469{
9470 const unsigned int inputShape[] = {4, 1, 1, 1};
9471 const unsigned int outputShape[] = {1, 1, 2, 2};
9472
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009473 std::vector<float> input({
9474 // Batch 0, Height 0, Width (2) x Channel (1)
9475 1.0f, 2.0f, 3.0f, 4.0f
9476 });
Mike Kelly831faed2018-11-28 11:52:08 +00009477
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009478 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009479
9480 std::vector<unsigned int> blockShape({2, 2});
9481 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9482
9483 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9484 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9485 crops, outputShape, expectedOutput);
9486}
9487
9488LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009489 armnn::IWorkloadFactory& workloadFactory,
9490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009491{
9492 const unsigned int inputShape[] = {4, 3, 1, 1};
9493 const unsigned int outputShape[] = {1, 3, 2, 2};
9494
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009495 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009496
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009497 std::vector<float> expectedOutput({
9498 // Batch 0, Channel 0, Height (2) x Width (2)
9499 1.0f, 7.0f,
9500 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009501
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009502 // Batch 0, Channel 1, Height (2) x Width (2)
9503 3.0f, 9.0f,
9504 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009505
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009506 // Batch 0, Channel 2, Height (2) x Width (2)
9507 5.0f, 11.0f,
9508 6.0f, 12.0f,
9509 });
Mike Kelly831faed2018-11-28 11:52:08 +00009510
9511 std::vector<unsigned int> blockShape({2, 2});
9512 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9513
9514 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9515 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9516 crops, outputShape, expectedOutput);
9517}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009518
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009519LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9520 armnn::IWorkloadFactory& workloadFactory,
9521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009522{
9523 const unsigned int inputShape[] = {4, 2, 2, 1};
9524 const unsigned int outputShape[] = {1, 4, 4, 1};
9525
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009526 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9527 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009528
9529 std::vector<unsigned int> blockShape({2, 2});
9530 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9531
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009532 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9533 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009534}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009535
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009536LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9537 armnn::IWorkloadFactory& workloadFactory,
9538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9539{
9540 const unsigned int inputShape[] = {4, 1, 1, 1};
9541 const unsigned int outputShape[] = {1, 2, 2, 1};
9542
9543 std::vector<uint8_t> input({
9544 // Batch 0, Height 0, Width (2) x Channel (1)
9545 1, 2, 3, 4
9546 });
9547
9548 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9549
9550 std::vector<unsigned int> blockShape({2, 2});
9551 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9552
9553 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9554 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9555 crops, outputShape, expectedOutput);
9556}
9557
9558LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9559 armnn::IWorkloadFactory& workloadFactory,
9560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9561{
9562 const unsigned int inputShape[] = {4, 1, 1, 3};
9563 const unsigned int outputShape[] = {1, 2, 2, 3};
9564
9565 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9566
9567 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9568
9569 std::vector<unsigned int> blockShape({2, 2});
9570 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9571
9572 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9573 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9574 crops, outputShape, expectedOutput);
9575}
9576
9577
9578LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9579 armnn::IWorkloadFactory &workloadFactory,
9580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9581{
9582 const unsigned int inputShape[] = {4, 3, 1, 1};
9583 const unsigned int outputShape[] = {1, 3, 2, 2};
9584
9585 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9586
9587 std::vector<uint8_t> expectedOutput({
9588 // Batch 0, Channel 0, Height (2) x Width (2)
9589 1, 4,
9590 7, 10,
9591
9592 // Batch 0, Channel 1, Height (2) x Width (2)
9593 2, 5,
9594 8, 11,
9595
9596 // Batch 0, Channel 2, Height (2) x Width (2)
9597 3, 6,
9598 9, 12,
9599 });
9600
9601 std::vector<unsigned int> blockShape({2, 2});
9602 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9603
9604 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9605 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9606 crops, outputShape, expectedOutput);
9607}
9608
9609LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9610 armnn::IWorkloadFactory& workloadFactory,
9611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9612{
9613 const unsigned int inputShape[] = {4, 1, 1, 1};
9614 const unsigned int outputShape[] = {1, 1, 2, 2};
9615
9616 std::vector<uint8_t> input({
9617 // Batch 0, Height 0, Width (2) x Channel (1)
9618 1, 2, 3, 4
9619 });
9620
9621 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9622
9623 std::vector<unsigned int> blockShape({2, 2});
9624 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9625
9626 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9627 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9628 crops, outputShape, expectedOutput);
9629}
9630
9631LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9632 armnn::IWorkloadFactory& workloadFactory,
9633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9634{
9635 const unsigned int inputShape[] = {4, 3, 1, 1};
9636 const unsigned int outputShape[] = {1, 3, 2, 2};
9637
9638 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9639
9640 std::vector<uint8_t> expectedOutput({
9641 // Batch 0, Channel 0, Height (2) x Width (2)
9642 1, 7,
9643 2, 8,
9644
9645 // Batch 0, Channel 1, Height (2) x Width (2)
9646 3, 9,
9647 4, 10,
9648
9649 // Batch 0, Channel 2, Height (2) x Width (2)
9650 5, 11,
9651 6, 12,
9652 });
9653
9654 std::vector<unsigned int> blockShape({2, 2});
9655 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9656
9657 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9658 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9659 crops, outputShape, expectedOutput);
9660}
9661
9662LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9663 armnn::IWorkloadFactory& workloadFactory,
9664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9665{
9666 const unsigned int inputShape[] = {8, 1, 1, 3};
9667 const unsigned int outputShape[] = {2, 1, 2, 4};
9668
9669 std::vector<uint8_t> input({
9670 0, 1, 3, 0, 9, 11,
9671 0, 2, 4, 0, 10, 12,
9672 0, 5, 7, 0, 13, 15,
9673 0, 6, 8, 0, 14, 16
9674 });
9675
9676 std::vector<uint8_t> expectedOutput({
9677 1, 2, 3, 4,
9678 5, 6, 7, 8,
9679 9, 10, 11, 12,
9680 13, 14, 15, 16
9681 });
9682
9683 std::vector<unsigned int> blockShape({2, 2});
9684 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9685
9686 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9687 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9688 crops, outputShape, expectedOutput);
9689}
9690
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009691LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9692 armnn::IWorkloadFactory& workloadFactory,
9693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9694{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009695 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009696}
9697
9698LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9699 armnn::IWorkloadFactory& workloadFactory,
9700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9701{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009702 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009703}
9704
9705LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9706 armnn::IWorkloadFactory& workloadFactory,
9707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9708{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009709 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009710}
9711
9712LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9713 armnn::IWorkloadFactory& workloadFactory,
9714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9715{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009716 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009717}
9718
9719LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9720 armnn::IWorkloadFactory& workloadFactory,
9721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9722{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009723 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009724}
9725
9726LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9727 armnn::IWorkloadFactory& workloadFactory,
9728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9729{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009730 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009731}
9732
9733LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9734 armnn::IWorkloadFactory& workloadFactory,
9735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9736{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009737 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009738}
9739
9740LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9741 armnn::IWorkloadFactory& workloadFactory,
9742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9743{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009744 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009745}
9746
9747LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9748 armnn::IWorkloadFactory& workloadFactory,
9749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9750{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009751 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009752}
9753
9754LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9755 armnn::IWorkloadFactory& workloadFactory,
9756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9757{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009758 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009759}
9760
9761LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9762 armnn::IWorkloadFactory& workloadFactory,
9763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009765 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009766}
9767
9768LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9769 armnn::IWorkloadFactory& workloadFactory,
9770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9771{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009772 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009773}
9774
9775LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9776 armnn::IWorkloadFactory& workloadFactory,
9777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9778{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009779 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009780}
9781
9782LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9783 armnn::IWorkloadFactory& workloadFactory,
9784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9785{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009786 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009787}
9788
9789LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9790 armnn::IWorkloadFactory& workloadFactory,
9791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9792{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009793 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009794}
9795
9796LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9797 armnn::IWorkloadFactory& workloadFactory,
9798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9799{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009800 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009801}
9802
9803LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9804 armnn::IWorkloadFactory& workloadFactory,
9805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9806{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009807 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009808}
9809
9810LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9811 armnn::IWorkloadFactory& workloadFactory,
9812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9813{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009814 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009815}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009816
Matteo Martincigh42666a12019-05-29 08:53:41 +01009817LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9818 armnn::IWorkloadFactory& workloadFactory,
9819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9820{
9821 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9822}
9823
9824LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9825 armnn::IWorkloadFactory& workloadFactory,
9826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9827{
9828 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9829}
9830
9831LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9832 armnn::IWorkloadFactory& workloadFactory,
9833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9834{
9835 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9836}
9837
9838LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9839 armnn::IWorkloadFactory& workloadFactory,
9840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9841{
9842 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9843}
9844
9845LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9846 armnn::IWorkloadFactory& workloadFactory,
9847 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9848{
9849 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9850}
9851
9852LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9853 armnn::IWorkloadFactory& workloadFactory,
9854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9855{
9856 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9857}
9858
9859LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9860 armnn::IWorkloadFactory& workloadFactory,
9861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9862{
9863 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9864}
9865
9866LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9867 armnn::IWorkloadFactory& workloadFactory,
9868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9869{
9870 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9871}
9872
9873LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9874 armnn::IWorkloadFactory& workloadFactory,
9875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9876{
9877 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9878}
9879
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009880LayerTestResult<float, 4> Debug4DFloat32Test(
9881 armnn::IWorkloadFactory& workloadFactory,
9882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9883{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009884 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009885}
9886
9887LayerTestResult<float, 3> Debug3DFloat32Test(
9888 armnn::IWorkloadFactory& workloadFactory,
9889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9890{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009891 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009892}
9893
9894LayerTestResult<float, 2> Debug2DFloat32Test(
9895 armnn::IWorkloadFactory& workloadFactory,
9896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9897{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009898 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009899}
9900
9901LayerTestResult<float, 1> Debug1DFloat32Test(
9902 armnn::IWorkloadFactory& workloadFactory,
9903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009905 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009906}
9907
9908LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9909 armnn::IWorkloadFactory& workloadFactory,
9910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9911{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009912 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009913}
9914
9915LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9916 armnn::IWorkloadFactory& workloadFactory,
9917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9918{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009919 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009920}
9921
9922LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9923 armnn::IWorkloadFactory& workloadFactory,
9924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9925{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009926 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009927}
9928
9929LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9930 armnn::IWorkloadFactory& workloadFactory,
9931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9932{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009933 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009934}
Matteo Martincigh49124022019-01-11 13:25:59 +00009935
narpra014951d842019-01-18 16:53:53 +00009936LayerTestResult<float, 1> Gather1DParamsFloatTest(
9937 armnn::IWorkloadFactory& workloadFactory,
9938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9939{
9940 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9941}
9942
9943LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9944 armnn::IWorkloadFactory& workloadFactory,
9945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9946{
9947 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9948}
9949
9950LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9951 armnn::IWorkloadFactory& workloadFactory,
9952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9953{
9954 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9955}
9956
9957LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9958 armnn::IWorkloadFactory& workloadFactory,
9959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9960{
9961 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9962}
9963
9964LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9965 armnn::IWorkloadFactory& workloadFactory,
9966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9967{
9968 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9969}
9970
9971LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9972 armnn::IWorkloadFactory& workloadFactory,
9973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9974{
9975 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9976 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009977}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009978
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009979LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009980 armnn::IWorkloadFactory& workloadFactory,
9981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9982{
9983 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9984}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009985
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009986LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9987 armnn::IWorkloadFactory& workloadFactory,
9988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9989{
9990 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9991}
9992
9993LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9994 armnn::IWorkloadFactory& workloadFactory,
9995 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9996{
9997 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9998}
9999
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010000LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10001 armnn::IWorkloadFactory& workloadFactory,
10002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10003{
10004 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10005}
10006
10007LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10008 armnn::IWorkloadFactory& workloadFactory,
10009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10010{
10011 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10012}
10013
10014LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10015 armnn::IWorkloadFactory& workloadFactory,
10016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10017{
10018 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10019}