blob: 46063803f051a5323dc6c5b66d135cccfad7c997 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
Francis Murtagh07f21212019-07-23 09:50:50 +010080struct Simple3dSoftmaxOutputData
81{
82 const std::vector<float> outputData =
83 {
84 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86 };
87
88 const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90 const std::vector<float> inputData =
91 {
92 0.f, 1.f, 0.f, 0.f,
93 .5f, 0.f, 0.f, 0.f,
94 };
95};
96
97struct Simple4dSoftmaxData
98{
99 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101 const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103 const std::vector<float> inputData =
104 {
105 0.f, 1.f, 0.f, 0.f,
106 .5f, 0.f, 0.f, 0.f
107 };
108};
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +0100112boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +0000113{
114 if(biasEnabled)
115 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +0100117 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +0000118 return bias;
119 }
120 else
121 {
122 return boost::multi_array<T, 1>();
123 }
124}
125
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000126template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000127LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130 float qScale,
131 int32_t qOffset,
132 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000133 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000134{
telsoa01c577f2c2018-08-31 09:22:23 +0100135 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000136 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000137 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
telsoa01c577f2c2018-08-31 09:22:23 +0100139 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000140 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000141 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142 QuantizedVector<T>(qScale, qOffset, {
143 1, 1, 1,
144 1, -1, 1,
145 1, 1, 1,
146 1, 1, 1,
147 1, 1, 1,
148
149 0, 0, 0,
150 0, 0, 0,
151 0, 0, 0,
152 0, 0, 0,
153 0, 0, 0,
154
155 2, 2, 2,
156 2, 2, 2,
157 2, 2, 2,
158 2, 2, 2,
159 2, 2, 2,
160
161
162 0, 0, 0,
163 0, 0, 0,
164 0, 0, 0,
165 0, 0, 0,
166 0, 0, 0,
167
168 1, 1, 1,
169 1, 1, 1,
170 1, 1, 1,
171 1, 1, 1,
172 1, 1, 1,
173
174 0, 0, 0,
175 0, 0, 0,
176 0, 0, 0,
177 0, 0, 0,
178 0, 0, 0
179 })));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000182 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000183 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188 -23.5f, -23.5f, -23.5f,
189 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190 -23.5f, -23.5f, -23.5f,
191
192 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196 })));
197
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199 workloadFactory,
200 memoryManager,
201 input,
202 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100203 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000204 expectedOutput,
205 qScale,
206 qOffset,
207 layout);
telsoa014fcda012018-03-09 14:13:49 +0000208}
209
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000212LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213 armnn::IWorkloadFactory& workloadFactory,
214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215 float qScale,
216 int32_t qOffset,
217 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000218 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000219{
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000221
telsoa01c577f2c2018-08-31 09:22:23 +0100222 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000223 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000224 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 1, 1, 1,
231 1, -1, 1,
232 1, 1, 1,
233
234 0, 0, 0,
235 0, 0, 0,
236 0, 0, 0,
237
238 2, 2, 2,
239 2, 2, 2,
240 2, 2, 2,
241
242
243 0, 0, 0,
244 0, 0, 0,
245 0, 0, 0,
246
247 1, 1, 1,
248 1, 1, 1,
249 1, 1, 1,
250
251 0, 0, 0,
252 0, 0, 0,
253 0, 0, 0
254 })));
255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000258 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259 QuantizedVector<T>(qScale, qOffset, {
260 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273 })));
274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000275 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276 workloadFactory,
277 memoryManager,
278 input,
279 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100280 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000281 expectedOutput,
282 qScale,
283 qOffset,
284 layout);
telsoa014fcda012018-03-09 14:13:49 +0000285}
286
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289 armnn::IWorkloadFactory& workloadFactory,
290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291 float qScale,
292 int32_t qOffset,
293 bool biasEnabled,
294 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100295{
296 // Use common single-batch 5x5 image.
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100299 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300 {
301 1, 5, 2, 3,
302 8, 7, 3, 6,
303 3, 3, 9, 1
304 });
305
306
307 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100309 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310 4, 5, 6,
311 0, 0, 0,
312 3, 2, 1
313 });
314
315 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000316 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100317
318 const std::vector<float> outputData =
319 {
320 23, 41, 33, 21,
321 44, 65, 76, 52,
322 82, 85, 79, 42
323 };
324
325 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000327 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328 workloadFactory,
329 memoryManager,
330 input,
331 kernel,
332 boost::multi_array<T, 1>(),
333 expectedOutput,
334 dataLayout,
335 qScale,
336 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100337}
338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000340LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341 armnn::IWorkloadFactory& workloadFactory,
342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343 float qScale,
344 int32_t qOffset,
345 bool biasEnabled,
346 const armnn::DataLayout& dataLayout)
347{
348 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000349 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000350 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351 {
352 1, 5, 2, 3, 5,
353 8, 7, 3, 6, 3,
354 3, 3, 9, 1, 9,
355 4, 1, 8, 1, 3,
356 6, 8, 1, 9, 2
357 });
358
359 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000360 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000361 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362 {
363 4, 5, 6,
364 0, 0, 0,
365 3, 2, 1
366 });
367
368 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000370
371 const std::vector<T> outputData =
372 {
373 23, 33, 24,
374 91, 99, 48,
375 26, 50, 19
376 };
377
378 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380 uint32_t padLeft = 1;
381 uint32_t padTop = 1;
382 uint32_t padRight = 1;
383 uint32_t padBottom = 1;
384 uint32_t strideX = 2;
385 uint32_t strideY = 2;
386
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000387 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388 workloadFactory,
389 memoryManager,
390 input,
391 kernel,
392 boost::multi_array<T, 1>(),
393 expectedOutput,
394 dataLayout,
395 qScale,
396 qOffset,
397 padLeft,
398 padTop,
399 padRight,
400 padBottom,
401 strideX,
402 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000415LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416 armnn::IWorkloadFactory& workloadFactory,
417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000419 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000421 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000425LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426 armnn::IWorkloadFactory& workloadFactory,
427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000431 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441 workloadFactory,
442 memoryManager,
443 0.f,
444 0,
445 biasEnabled,
446 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100447}
448
Mike Kelly7332ed82018-12-20 17:03:06 +0000449LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 bool biasEnabled,
453 const armnn::DataLayout layout)
454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000455 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456 workloadFactory,
457 memoryManager,
458 0.f,
459 0,
460 biasEnabled,
461 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000462}
463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000464LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000470 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000472}
473
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100474LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475 armnn::IWorkloadFactory& workloadFactory,
476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477 bool biasEnabled,
478 const armnn::DataLayout layout)
479{
480return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482}
483
484LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485 armnn::IWorkloadFactory& workloadFactory,
486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487 bool biasEnabled,
488 const armnn::DataLayout layout)
489{
490 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492}
493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000494template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000496LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000499 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 float qScale,
501 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000502{
telsoa01c577f2c2018-08-31 09:22:23 +0100503 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000504 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000505 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506 QuantizedVector<T>(qScale, qOffset, {
507 11,21,31,
508 12,22,32,
509 13,23,33
510 })));
511
telsoa01c577f2c2018-08-31 09:22:23 +0100512 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000514 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515 QuantizedVector<T>(qScale, qOffset, {
516 -11,-21,
517 -12,-22,
518 })));
519
telsoa01c577f2c2018-08-31 09:22:23 +0100520// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000521// Manually calculated like this:
522//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
523//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
524//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
527//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
528//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 0, 0, 0, 0, 0, 0,
533 -242, -594, -934, -372, 0, 0,
534 -495, -1190, -1850, -725, 0, 0,
535 -538, -1256, -1916, -748, 0, 0,
536 -273, -626, -946, -363, 0, 0,
537 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0,
539 0, 0, 0, 0, 0, 0
540 })));
541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543 workloadFactory,
544 memoryManager,
545 input,
546 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100547 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 expectedOutput,
549 qScale,
550 qOffset,
551 layout,
552 1, // Padding left.
553 2, // Padding top.
554 3, // Padding right.
555 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000556}
557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000560LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000563 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000564 float qScale,
565 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000566{
telsoa01c577f2c2018-08-31 09:22:23 +0100567 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000568 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000569 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570 QuantizedVector<T>(qScale, qOffset, {
571 11,21,31,41,51,
572 12,22,32,42,52,
573 13,23,33,43,53,
574 14,24,34,44,54,
575 15,25,35,45,55,
576 })));
577
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000580 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581 QuantizedVector<T>(qScale, qOffset, {
582 -11,-21,-31,-41,
583 -12,-22,-32,-42,
584 -13,-23,-33,-43,
585 -14,-24,-34,-44,
586 })));
587
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000589 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000590 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000593 -7140, -10580, -13940, -9300, -5230,
594 -9590, -14120, -18520, -12290, -6860,
595 -9980, -14560, -18960, -12560, -7000,
596 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100597 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000598 })));
599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000600 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000602 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000603 input,
604 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100605 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000606 expectedOutput,
607 qScale,
608 qOffset,
narpra015f703182018-10-26 16:24:58 +0100609 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100610 1, // Padding left.
611 1, // Padding top.
612 2, // Padding right.
613 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100614}
615
Teresa Charlinedeeb162019-06-14 11:09:19 +0100616LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617 armnn::IWorkloadFactory& workloadFactory,
618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619 armnn::DataLayout layout)
620{
621 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622 workloadFactory, memoryManager, layout, 0.0f, 0);
623}
624
625LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 armnn::DataLayout layout)
629{
630 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631 <armnn::DataType::Float32, armnn::DataType::Float32>(
632 workloadFactory, memoryManager, layout, 0.0f, 0);
633}
634
635LayerTestResult<float, 4> Convolution1dTest(
636 armnn::IWorkloadFactory& workloadFactory,
637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638 bool biasEnabled)
639{
640 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642}
643
644LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645 armnn::IWorkloadFactory& workloadFactory,
646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647 bool biasEnabled)
648{
649 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651}
652
653LayerTestResult<float,4> CompareConvolution2dTest(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656 armnn::IWorkloadFactory& refWorkloadFactory)
657{
658 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659 workloadFactory, memoryManager, refWorkloadFactory);
660}
661
662template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664 armnn::IWorkloadFactory& workloadFactory,
665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666 const std::vector<float>& inputNoQuantizedValues,
667 armnn::TensorInfo& inputTensorInfo,
668 const std::vector<float>& kernelNoQuantizedValues,
669 armnn::TensorInfo& kernelTensorInfo,
670 const std::vector<float>& outputExpectedNoQuantizedValues,
671 armnn::TensorInfo& outputTensorInfo,
672 uint32_t dilationX,
673 uint32_t dilationY,
674 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100675 uint32_t padLeft = 0,
676 uint32_t padTop = 0,
677 uint32_t padRight = 0,
678 uint32_t padBottom = 0,
679 uint32_t strideX = 1,
680 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100681 bool biasEnabled = false
682)
683{
684 float qScale;
685 int32_t qOffset;
686 switch (ArmnnType)
687 {
688 case armnn::DataType::QuantisedAsymm8:
689 {
690 qScale = 0.1f;
691 qOffset = 128;
692 break;
693 }
694 case armnn::DataType::QuantisedSymm16:
695 {
696 qScale = 0.1f;
697 qOffset = 0;
698 break;
699 }
700 case armnn::DataType::Float32:
701 default:
702 {
703 qScale = 0.f;
704 qOffset = 0;
705 break;
706 }
707 }
708
709 inputTensorInfo.SetQuantizationScale(qScale);
710 inputTensorInfo.SetQuantizationOffset(qOffset);
711 kernelTensorInfo.SetQuantizationScale(qScale);
712 kernelTensorInfo.SetQuantizationOffset(qOffset);
713 outputTensorInfo.SetQuantizationScale(qScale);
714 outputTensorInfo.SetQuantizationOffset(qOffset);
715
716 auto input = MakeTensor<T, 4>(inputTensorInfo,
717 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718 inputTensorInfo.GetQuantizationOffset(),
719 inputNoQuantizedValues)));
720 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722 kernelTensorInfo.GetQuantizationOffset(),
723 kernelNoQuantizedValues)));
724 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726 outputTensorInfo.GetQuantizationOffset(),
727 outputExpectedNoQuantizedValues)));
728
Teresa Charlinedeeb162019-06-14 11:09:19 +0100729 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730 workloadFactory,
731 memoryManager,
732 input,
733 kernel,
734 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735 expectedOutput,
736 qScale,
737 qOffset,
738 layout,
739 padLeft,
740 padTop,
741 padRight,
742 padBottom,
743 strideX,
744 strideY,
745 dilationX,
746 dilationY);
747}
748
749template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753 bool biasEnabled,
754 const armnn::DataLayout layout)
755{
756 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757 std::vector<float> inputNoQuantizedValues =
758 {
759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769 };
770
771 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772 std::vector<float> kernelNoQuantizedValues =
773 {
774 1, 2, 3,
775 4, 5, 6,
776 7, 8, 9
777 };
778
779 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782 std::vector<float> outputExpectedNoQuantizedValues =
783 {
784 6., 5., 5., 5.,
785 6., 5., 5., 5.,
786 6., 5., 5., 5.,
787 3., 2., 2., 2.
788 };
789
790 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791 workloadFactory,
792 memoryManager,
793 inputNoQuantizedValues,
794 inputTensorInfo,
795 kernelNoQuantizedValues,
796 kernelTensorInfo,
797 outputExpectedNoQuantizedValues,
798 outputTensorInfo,
799 3,
800 3,
801 layout,
802 biasEnabled);
803}
804
805template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 bool biasEnabled,
810 const armnn::DataLayout layout)
811{
812 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813 std::vector<float> inputNoQuantizedValues =
814 {
815 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836 };
837
838 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839 std::vector<float> kernelNoQuantizedValues =
840 {
841 1, 2, 3,
842 4, 5, 6,
843 7, 8, 9,
844
845 1, 2, 3,
846 4, 5, 6,
847 7, 8, 9
848 };
849
850 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853 std::vector<float> outputExpectedNoQuantizedValues =
854 {
855 12., 10., 10., 10.,
856 12., 10., 10., 10.,
857 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100858 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100859 };
860
861 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862 workloadFactory,
863 memoryManager,
864 inputNoQuantizedValues,
865 inputTensorInfo,
866 kernelNoQuantizedValues,
867 kernelTensorInfo,
868 outputExpectedNoQuantizedValues,
869 outputTensorInfo,
870 3,
871 3,
872 layout,
873 biasEnabled);
874}
875
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100876template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878 armnn::IWorkloadFactory &workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880 bool biasEnabled,
881 const armnn::DataLayout layout)
882{
883 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884 std::vector<float> inputNoQuantizedValues =
885 {
886 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896 };
897
898 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899 std::vector<float> kernelNoQuantizedValues =
900 {
901 1, 2,
902 3, 4
903 };
904
905 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
Jan Eilers0bf6b232019-07-12 10:46:33 +0100907 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100908 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909 std::vector<float> outputExpectedNoQuantizedValues =
910 {
911 4, 7, 7, 3,
912 6, 10, 10, 4,
913 6, 10, 10, 4,
914 2, 3, 3, 1
915 };
916 uint32_t padLeft = 1;
917 uint32_t padTop = 1;
918 uint32_t padRight = 1;
919 uint32_t padBottom = 1;
920
921 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922 workloadFactory,
923 memoryManager,
924 inputNoQuantizedValues,
925 inputTensorInfo,
926 kernelNoQuantizedValues,
927 kernelTensorInfo,
928 outputExpectedNoQuantizedValues,
929 outputTensorInfo,
930 2,
931 2,
932 layout,
933 padLeft,
934 padTop,
935 padRight,
936 padBottom,
937 3,
938 3,
939 biasEnabled
940 );
941}
942
Teresa Charlinedeeb162019-06-14 11:09:19 +0100943template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945 armnn::IWorkloadFactory&,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947 bool,
948 armnn::DataLayout);
949
950template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952 armnn::IWorkloadFactory&,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954 bool,
955 armnn::DataLayout);
956
957template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959 armnn::IWorkloadFactory&,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961 bool,
962 armnn::DataLayout);
963
964template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966 armnn::IWorkloadFactory&,
967 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968 bool,
969 armnn::DataLayout);
970
971template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973 armnn::IWorkloadFactory&,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975 bool,
976 armnn::DataLayout);
977
978template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980 armnn::IWorkloadFactory&,
981 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982 bool,
983 armnn::DataLayout);
984
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100985template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987 armnn::IWorkloadFactory &workloadFactory,
988 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989 bool biasEnabled,
990 const armnn::DataLayout layout);
991
992template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994 armnn::IWorkloadFactory &workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996 bool biasEnabled,
997 const armnn::DataLayout layout);
998
999template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001 armnn::IWorkloadFactory &workloadFactory,
1002 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003 bool biasEnabled,
1004 const armnn::DataLayout layout);
1005
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001006template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001008LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009 armnn::IWorkloadFactory& workloadFactory,
1010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011 float qScale,
1012 int32_t qOffset,
1013 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001014 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001015{
telsoa01c577f2c2018-08-31 09:22:23 +01001016 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001018 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001019 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020 {
surmeh013537c2c2018-05-18 16:31:43 +01001021 0, 1, 2, 3, 4,
1022 5, 6, 7, 8, 9,
1023 10, 11, 12, 13, 14,
1024 15, 16, 17, 18, 19,
1025 20, 21, 22, 23, 24,
1026
1027 25, 26, 27, 28, 29,
1028 30, 31, 32, 33, 34,
1029 35, 36, 37, 38, 39,
1030 40, 41, 42, 43, 44,
1031 45, 46, 47, 48, 49
1032 })));
1033
telsoa01c577f2c2018-08-31 09:22:23 +01001034 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001036 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001037 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038 {
surmeh013537c2c2018-05-18 16:31:43 +01001039 32, 31, 30, 29,
1040 28, 27, 26, 25,
1041 24, 23, 22, 21,
1042 20, 19, 18, 17,
1043
1044 16, 15, 14, 13,
1045 12, 11, 10, 9,
1046 8, 7, 6, 5,
1047 4, 3, 2, 1
1048 })));
1049
telsoa01c577f2c2018-08-31 09:22:23 +01001050 // Expected output is 1 batch of a 2-channel 5x5 image.
1051 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001052 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001053 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001054 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055 {
surmeh013537c2c2018-05-18 16:31:43 +01001056 1062, 1580, 1850, 1530, 1117,
1057 2140, 3108, 3500, 2842, 2042,
1058 3580, 5068, 5460, 4342, 3062,
1059 3618, 5072, 5390, 4248, 2971,
1060 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061
surmeh013537c2c2018-05-18 16:31:43 +01001062 1550, 2284, 2362, 1955, 1428,
1063 2910, 4206, 4342, 3528, 2536,
1064 3390, 4886, 5022, 4068, 2916,
1065 3566, 5056, 5182, 4133, 2922,
1066 3100, 4352, 4452, 3517, 2465
1067 })));
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001072 input,
1073 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001074 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001075 expectedOutput,
1076 qScale,
1077 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001078 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001079 1, // Padding left.
1080 1, // Padding top.
1081 2, // Padding right.
1082 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001083 1, // strideX
1084 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001085}
1086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001087template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001089LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090 armnn::IWorkloadFactory& workloadFactory,
1091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092 float qScale,
1093 int32_t qOffset,
1094 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001095{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 auto layout = armnn::DataLayout::NHWC;
1097
1098 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001099 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101 {
1102 0, 1, 2, 3, 4,
1103 5, 6, 7, 8, 9,
1104 10, 11, 12, 13, 14,
1105 15, 16, 17, 18, 19,
1106 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001107
Teresa Charlin20b1f882019-06-19 09:34:37 +01001108 25, 26, 27, 28, 29,
1109 30, 31, 32, 33, 34,
1110 35, 36, 37, 38, 39,
1111 40, 41, 42, 43, 44,
1112 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001113 })));
1114
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001115 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001116 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001119 32, 31, 30, 29,
1120 28, 27, 26, 25,
1121 24, 23, 22, 21,
1122 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123
Matteo Martincigh747ef822018-12-18 09:26:39 +00001124 16, 15, 14, 13,
1125 12, 11, 10, 9,
1126 8, 7, 6, 5,
1127 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001128 })));
1129
Teresa Charlin20b1f882019-06-19 09:34:37 +01001130 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001131 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001132 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133 {
1134 1062, 1580, 1850, 1530, 1117,
1135 2140, 3108, 3500, 2842, 2042,
1136 3580, 5068, 5460, 4342, 3062,
1137 3618, 5072, 5390, 4248, 2971,
1138 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001139
Teresa Charlin20b1f882019-06-19 09:34:37 +01001140 1550, 2284, 2362, 1955, 1428,
1141 2910, 4206, 4342, 3528, 2536,
1142 3390, 4886, 5022, 4068, 2916,
1143 3566, 5056, 5182, 4133, 2922,
1144 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001145 })));
1146
Teresa Charlin20b1f882019-06-19 09:34:37 +01001147 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001148 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001149 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001150 input,
1151 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001152 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001153 expectedOutput,
1154 qScale,
1155 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001156 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001157 1, // Padding left.
1158 1, // Padding top.
1159 2, // Padding right.
1160 2, // Padding bottom.
1161 1, // strideX
1162 1); // strideY
1163}
1164
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166 typename T = armnn::ResolveType<ArmnnType>>
1167LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168 armnn::IWorkloadFactory& workloadFactory,
1169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170 float qScale,
1171 int32_t qOffset,
1172 bool biasEnabled)
1173{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001174 auto layout = armnn::DataLayout::NHWC;
1175
1176 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001177 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001178 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001180 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0
1189 })));
1190
1191 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001193 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 1, 2, 3,
1196 4, 5, 6,
1197 7, 8, 9
1198 })));
1199
1200 uint32_t padLeft = 0;
1201 uint32_t padTop = 0;
1202 uint32_t padRight = 0;
1203 uint32_t padBottom = 0;
1204 uint32_t strideX = 1;
1205 uint32_t strideY = 1;
1206 uint32_t dilationX = 3;
1207 uint32_t dilationY = 3;
1208
1209 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001210 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001212 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001214 5, 5, 5,
1215 5, 5, 5,
1216 5, 5, 5
1217 })));
1218
Teresa Charlin20b1f882019-06-19 09:34:37 +01001219 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001220 workloadFactory,
1221 memoryManager,
1222 input,
1223 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001224 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001225 expectedOutput,
1226 qScale,
1227 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001228 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001229 padLeft,
1230 padTop,
1231 padRight,
1232 padBottom,
1233 strideX,
1234 strideY,
1235 dilationX,
1236 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001237}
1238
Teresa Charlin20b1f882019-06-19 09:34:37 +01001239
1240template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242 armnn::IWorkloadFactory& workloadFactory,
1243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244 const std::vector<float>& inputNoQuantizedValues,
1245 armnn::TensorInfo& inputTensorInfo,
1246 const std::vector<float>& kernelNoQuantizedValues,
1247 armnn::TensorInfo& kernelTensorInfo,
1248 const std::vector<float>& outputExpectedNoQuantizedValues,
1249 armnn::TensorInfo& outputTensorInfo,
1250 uint32_t dilationX,
1251 uint32_t dilationY,
1252 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253 bool biasEnabled = false)
1254{
1255 float qScale;
1256 int32_t qOffset;
1257 switch (ArmnnType)
1258 {
1259 case armnn::DataType::QuantisedAsymm8:
1260 {
1261 qScale = 0.1f;
1262 qOffset = 128;
1263 break;
1264 }
1265 case armnn::DataType::QuantisedSymm16:
1266 {
1267 qScale = 0.1f;
1268 qOffset = 0;
1269 break;
1270 }
1271 case armnn::DataType::Float32:
1272 default:
1273 {
1274 qScale = 0.f;
1275 qOffset = 0;
1276 break;
1277 }
1278 }
1279
1280 inputTensorInfo.SetQuantizationScale(qScale);
1281 inputTensorInfo.SetQuantizationOffset(qOffset);
1282 kernelTensorInfo.SetQuantizationScale(qScale);
1283 kernelTensorInfo.SetQuantizationOffset(qOffset);
1284 outputTensorInfo.SetQuantizationScale(qScale);
1285 outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287 auto input = MakeTensor<T, 4>(inputTensorInfo,
1288 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289 inputTensorInfo.GetQuantizationOffset(),
1290 inputNoQuantizedValues)));
1291 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293 kernelTensorInfo.GetQuantizationOffset(),
1294 kernelNoQuantizedValues)));
1295 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297 outputTensorInfo.GetQuantizationOffset(),
1298 outputExpectedNoQuantizedValues)));
1299
1300 uint32_t padLeft = 0;
1301 uint32_t padTop = 0;
1302 uint32_t padRight = 0;
1303 uint32_t padBottom = 0;
1304 uint32_t strideX = 1;
1305 uint32_t strideY = 1;
1306
1307 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308 workloadFactory,
1309 memoryManager,
1310 input,
1311 kernel,
1312 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313 expectedOutput,
1314 qScale,
1315 qOffset,
1316 layout,
1317 padLeft,
1318 padTop,
1319 padRight,
1320 padBottom,
1321 strideX,
1322 strideY,
1323 dilationX,
1324 dilationY);
1325}
1326
1327template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 bool biasEnabled,
1332 const armnn::DataLayout layout)
1333{
1334 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335 std::vector<float> inputNoQuantizedValues =
1336 {
1337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347 };
1348
1349 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350 std::vector<float> kernelNoQuantizedValues =
1351 {
1352 1, 2, 3,
1353 4, 5, 6,
1354 7, 8, 9
1355 };
1356
1357 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360 std::vector<float> outputExpectedNoQuantizedValues =
1361 {
1362 6., 5., 5., 5.,
1363 6., 5., 5., 5.,
1364 6., 5., 5., 5.,
1365 3., 2., 2., 2.
1366 };
1367
1368 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369 workloadFactory,
1370 memoryManager,
1371 inputNoQuantizedValues,
1372 inputTensorInfo,
1373 kernelNoQuantizedValues,
1374 kernelTensorInfo,
1375 outputExpectedNoQuantizedValues,
1376 outputTensorInfo,
1377 3,
1378 3,
1379 layout,
1380 biasEnabled);
1381}
1382
1383template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385 armnn::IWorkloadFactory& workloadFactory,
1386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387 bool biasEnabled,
1388 const armnn::DataLayout layout)
1389{
1390 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391 std::vector<float> inputNoQuantizedValues =
1392 {
1393 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414 };
1415
1416 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417 std::vector<float> kernelNoQuantizedValues =
1418 {
1419 1, 2, 3,
1420 4, 5, 6,
1421 7, 8, 9,
1422
1423 1, 2, 3,
1424 4, 5, 6,
1425 7, 8, 9
1426 };
1427
1428 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431 std::vector<float> outputExpectedNoQuantizedValues =
1432 {
1433 6., 5., 5., 5.,
1434 6., 5., 5., 5.,
1435 6., 5., 5., 5.,
1436 3., 2., 2., 2.,
1437
1438 6., 5., 5., 5.,
1439 6., 5., 5., 5.,
1440 6., 5., 5., 5.,
1441 3., 2., 2., 2.
1442 };
1443
1444 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445 workloadFactory,
1446 memoryManager,
1447 inputNoQuantizedValues,
1448 inputTensorInfo,
1449 kernelNoQuantizedValues,
1450 kernelTensorInfo,
1451 outputExpectedNoQuantizedValues,
1452 outputTensorInfo,
1453 3,
1454 3,
1455 layout,
1456 biasEnabled);
1457}
1458
1459
1460template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462 armnn::IWorkloadFactory&,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464 bool,
1465 armnn::DataLayout);
1466
1467template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469 armnn::IWorkloadFactory&,
1470 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471 bool,
1472 armnn::DataLayout);
1473
1474template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476 armnn::IWorkloadFactory&,
1477 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478 bool,
1479 armnn::DataLayout);
1480
1481template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483 armnn::IWorkloadFactory&,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485 bool,
1486 armnn::DataLayout);
1487
1488template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490 armnn::IWorkloadFactory&,
1491 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492 bool,
1493 armnn::DataLayout);
1494
1495template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497 armnn::IWorkloadFactory&,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499 bool,
1500 armnn::DataLayout);
1501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503 armnn::IWorkloadFactory& workloadFactory,
1504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001506 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001508 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001509 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001510}
1511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513 armnn::IWorkloadFactory& workloadFactory,
1514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001517 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001519}
1520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001525 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001527 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001528 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001531LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534{
1535 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538 std::vector<float> kernelData;
1539 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540 for (unsigned int i = 0; i < 64; ++i)
1541 {
1542 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543 }
1544 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547 std::vector<float> expectedOutputData(64, 0.f);
1548 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552 workloadFactory,
1553 memoryManager,
1554 input,
1555 kernel,
1556 boost::multi_array<float, 1>(),
1557 expectedOutput,
1558 0.f,
1559 0,
1560 armnn::DataLayout::NCHW);
1561}
1562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001563LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001567 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001569 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001570 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001571}
1572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001573LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574 armnn::IWorkloadFactory& workloadFactory,
1575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001577 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001578{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001579 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001580 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001581}
1582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001583LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584 armnn::IWorkloadFactory& workloadFactory,
1585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001587 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001589 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001590 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001591}
1592
Bruno Goncalves22972f02019-04-26 21:03:24 -03001593LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594 armnn::IWorkloadFactory& workloadFactory,
1595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596{
1597 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001598 workloadFactory,
1599 memoryManager,
1600 0.f,
1601 0,
1602 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001603}
1604
Ruomei Yan88d44b82019-05-23 14:29:06 +01001605LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 bool biasEnabled,
1609 const armnn::DataLayout layout)
1610{
1611 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613}
1614
1615LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 bool biasEnabled,
1619 const armnn::DataLayout layout)
1620{
1621 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623}
1624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001625LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001629 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001631 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001633}
1634
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001635LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636 armnn::IWorkloadFactory& workloadFactory,
1637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638 armnn::IWorkloadFactory& refWorkloadFactory,
1639 const armnn::DataLayout layout)
1640{
1641 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642 workloadFactory, memoryManager, refWorkloadFactory, layout);
1643}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001644
1645LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001648{
1649 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001651 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
1658 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001660 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001666{
1667 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001669 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001670}
1671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001672LayerTestResult<float,2> SimpleSoftmaxTest(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001677 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001678}
1679
Francis Murtagh07f21212019-07-23 09:50:50 +01001680LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681 armnn::IWorkloadFactory& workloadFactory,
1682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683 float beta,
1684 int axis)
1685{
1686 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687}
1688
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001689LayerTestResult<float,3> Simple3dSoftmaxTest(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692 float beta)
1693{
Francis Murtagh07f21212019-07-23 09:50:50 +01001694 Simple3dSoftmaxOutputData data;
1695 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696 data.inputShape, data.outputData, data.inputData);
1697}
1698
1699LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 float beta,
1703 int axis)
1704{
1705 armnn::TensorShape inputShape;
1706 std::vector<float> inputData;
1707 std::vector<float> outputData;
1708 switch (axis)
1709 {
1710 case -3:
1711 case 0:
1712 {
1713 inputShape = {5, 2, 2};
1714
1715 inputData =
1716 {
1717 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720 };
1721
1722 outputData =
1723 {
1724 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725 0.236882800924671f,
1726 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727 0.087144312427294f,
1728
1729 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730 0.032058600957022f,
1731 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732 7.246299848982885e-08f
1733 };
1734 break;
1735 }
1736 case -2:
1737 case 1:
1738 {
1739 inputShape = {2, 5, 2};
1740
1741 inputData =
1742 {
1743 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746 };
1747
1748 outputData =
1749 {
1750 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751 0.087144312427294f,
1752 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753 7.246299848982885e-08f,
1754
1755 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756 0.087144312427294f,
1757 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758 7.246299848982885e-08f
1759 };
1760 break;
1761 }
1762 case -1:
1763 case 2:
1764 {
1765 inputShape = {2, 2, 5};
1766
1767 inputData =
1768 {
1769 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771 };
1772
1773 outputData =
1774 {
1775 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776 7.246299848982885e-08f,
1777 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778 7.246299848982885e-08f,
1779
1780 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781 7.246299848982885e-08f,
1782 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783 7.246299848982885e-08f
1784 };
1785 break;
1786 }
1787 }
1788
1789 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790 inputShape, outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001791}
1792
1793LayerTestResult<float,4> Simple4dSoftmaxTest(
1794 armnn::IWorkloadFactory& workloadFactory,
1795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796 float beta)
1797{
Francis Murtagh07f21212019-07-23 09:50:50 +01001798 Simple4dSoftmaxData data;
1799 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800 data.outputData, data.inputData);
1801}
1802
1803LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804 armnn::IWorkloadFactory& workloadFactory,
1805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806 float beta,
1807 int axis)
1808{
1809 armnn::TensorShape inputShape;
1810 std::vector<float> inputData;
1811 std::vector<float> outputData;
1812 switch (axis)
1813 {
1814 case -4:
1815 case 0:
1816 {
1817 inputShape = {5, 2, 2, 2};
1818
1819 inputData =
1820 {
1821 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825 };
1826
1827 outputData =
1828 {
1829 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830 0.643914213228014f,
1831 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832 0.236882800924671f,
1833 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834 0.236882800924671f,
1835 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836 0.087144312427294f,
1837
1838 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839 0.032058600957022f,
1840 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841 0.032058600957022f,
1842 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843 7.246299848982885e-08f,
1844 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845 7.246299848982885e-08f, 7.246299848982885e-08f
1846 };
1847 break;
1848 }
1849 case -3:
1850 case 1:
1851 {
1852 inputShape = {2, 5, 2, 2};
1853
1854 inputData =
1855 {
1856 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860 };
1861
1862 outputData =
1863 {
1864 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865 0.236882800924671f,
1866 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867 0.087144312427294f,
1868 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869 0.032058600957022f,
1870 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871 7.246299848982885e-08f,
1872
1873
1874 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875 0.236882800924671f,
1876 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877 0.087144312427294f,
1878 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879 0.032058600957022f,
1880 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881 7.246299848982885e-08f
1882 };
1883 break;
1884 }
1885 case -2:
1886 case 2:
1887 {
1888 inputShape = {2, 2, 5, 2};
1889
1890 inputData =
1891 {
1892 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896 };
1897
1898 outputData =
1899 {
1900 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901 0.087144312427294f,
1902 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903 7.246299848982885e-08f,
1904 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905 0.087144312427294f,
1906 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907 7.246299848982885e-08f,
1908
1909 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910 0.087144312427294f,
1911 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912 7.246299848982885e-08f,
1913 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914 0.087144312427294f,
1915 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916 7.246299848982885e-08f
1917 };
1918 break;
1919 }
1920 case -1:
1921 case 3:
1922 {
1923 inputShape = {2, 2, 2, 5};
1924
1925 inputData =
1926 {
1927 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931 };
1932
1933 outputData =
1934 {
1935 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936 7.246299848982885e-08f,
1937 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938 7.246299848982885e-08f,
1939 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940 7.246299848982885e-08f,
1941 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942 7.246299848982885e-08f,
1943
1944 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945 7.246299848982885e-08f,
1946 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947 7.246299848982885e-08f,
1948 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949 7.246299848982885e-08f,
1950 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951 7.246299848982885e-08f
1952 };
1953 break;
1954 }
1955 }
1956
1957 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958 outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001959}
1960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001961LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962 armnn::IWorkloadFactory& workloadFactory,
1963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001966 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001967}
1968
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001969LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970 armnn::IWorkloadFactory& workloadFactory,
1971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972 float beta)
1973{
Francis Murtagh07f21212019-07-23 09:50:50 +01001974 Simple3dSoftmaxOutputData data;
1975 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001977}
1978
1979LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982 float beta)
1983{
Francis Murtagh07f21212019-07-23 09:50:50 +01001984 Simple4dSoftmaxData data;
1985
1986 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001988}
1989
nikraj01248683f2019-05-29 16:46:50 +01001990LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993 float beta)
1994{
1995 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996}
1997
1998LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999 armnn::IWorkloadFactory& workloadFactory,
2000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001 float beta)
2002{
Francis Murtagh07f21212019-07-23 09:50:50 +01002003 Simple3dSoftmaxOutputData data;
2004 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002006}
2007
2008LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011 float beta)
2012{
Francis Murtagh07f21212019-07-23 09:50:50 +01002013 Simple4dSoftmaxData data;
2014
2015 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002017}
2018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002019LayerTestResult<float,4> CompareNormalizationTest(
2020 armnn::IWorkloadFactory& workloadFactory,
2021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022 armnn::IWorkloadFactory& refWorkloadFactory,
2023 armnn::NormalizationAlgorithmChannel normChannel,
2024 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00002025{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002026 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00002027}
2028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002029LayerTestResult<float,2> CompareSoftmaxTest(
2030 armnn::IWorkloadFactory& workloadFactory,
2031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002032 armnn::IWorkloadFactory& refWorkloadFactory,
2033 float beta)
2034{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002035 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002037}
2038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 armnn::IWorkloadFactory& refWorkloadFactory,
2043 float beta)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002047}
2048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002049std::vector<LayerTestResult<float,3>> SplitterTest(
2050 armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002053 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00002054}
2055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002056std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002060 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002061}
2062
Ruomei Yan25339c32019-05-28 16:48:20 +01002063std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064 armnn::IWorkloadFactory& workloadFactory,
2065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066{
2067 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068}
2069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002070LayerTestResult<float, 3> CopyViaSplitterTest(
2071 armnn::IWorkloadFactory& workloadFactory,
2072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002074 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002075}
2076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002077LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078 armnn::IWorkloadFactory& workloadFactory,
2079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002081 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002082}
2083
Ruomei Yan25339c32019-05-28 16:48:20 +01002084LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085 armnn::IWorkloadFactory& workloadFactory,
2086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087{
2088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089}
2090
Jan Eilers38e05bd2019-06-26 13:10:09 +01002091void LstmUtilsZeroVectorTest()
2092{
2093 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095 {2., 3., 3., 4.}));
2096
2097 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098 {0., 0., 0., 0.}));
2099
2100 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101}
2102
2103void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104{
2105 uint32_t batchSize = 2;
2106 uint32_t vecSize = 4;
2107 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
2110 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
2111
2112 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
2114 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
2115
2116 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117 vecSize, batchSize, expectedOutput);
2118}
2119
2120void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121{
2122 uint32_t batchSize = 2;
2123 uint32_t vecSize = 4;
2124 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2127 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2128
2129 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2131 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2132
2133 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134 vecSize, batchSize, expectedOutput);
2135}
2136
2137void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138{
2139 uint32_t batchSize = 2;
2140 uint32_t vecSize = 4;
2141 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2144 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
2145
2146 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2148 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
2149
2150 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151 vecSize, batchSize, expectedOutput);
2152}
2153
2154
2155void LstmUtilsVectorBatchVectorCwiseProductTest()
2156{
2157 uint32_t batchSize = 4;
2158 uint32_t vecSize = 29;
2159 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2162 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
2164
2165 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167 { /* batch 0 */
2168 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2169 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2170 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
2171 /* batch 1 */
2172 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
2173 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
2175 /* batch 2 */
2176 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
2177 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
2179 /* batch 3 */
2180 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
2181 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
2183
2184 // Expect output = input * output + output.
2185 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186 { /* batch 0 */
2187 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
2188 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
2189 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
2190 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
2191 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
2192 /* batch 1 */
2193 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
2194 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
2198 /* batch 2 */
2199 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
2200 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
2201 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
2202 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
2203 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
2204 /* batch 3 */
2205 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
2206 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
2207 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
2208 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
2209 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
2210
2211 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212 vecSize, batchSize, expectedOutput);
2213}
2214
2215
2216void LstmUtilsVectorBatchVectorAddTest()
2217{
2218 uint32_t batchSize = 2;
2219 uint32_t vecSize = 3;
2220 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222 { 0.0f, -0.5f, 1.0f}));
2223
2224 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226 { 1.0f, 2.0f, 3.0f, //batch 0
2227 4.0f, 5.0f, 6.0f})); //batch 1
2228
2229 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230 { 1.0f, 1.5f, 4.0f,
2231 4.0f, 4.5f, 7.0f}));
2232
2233 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234 vecSize, batchSize, expectedOutput);
2235}
2236
2237
telsoa01c577f2c2018-08-31 09:22:23 +01002238LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239 armnn::IWorkloadFactory& workloadFactory,
2240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002242 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002243 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244 { 2., 3., 3., 4. }));
2245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002246 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002247 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002250 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002251 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002252}
2253
2254LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01002255 armnn::IWorkloadFactory& workloadFactory,
2256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002258 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002259 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002263 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002264 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2266 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2267 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2268 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2269 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2270 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
2271 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002272 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002274}
2275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002276LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277 armnn::IWorkloadFactory& workloadFactory,
2278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002280 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002281 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282 {2., 3., 3., 4.}));
2283
2284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002285 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002286 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2288 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
2289
Conor Kennedyb9971c92019-05-07 07:14:23 +01002290 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002291 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002292}
2293
Jan Eilers38e05bd2019-06-26 13:10:09 +01002294
2295LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
2302 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
2303
2304 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2307 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2308 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309 workloadFactory, memoryManager, input, expectedOutput);
2310}
2311
2312
Conor Kennedyb9971c92019-05-07 07:14:23 +01002313LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314 armnn::IWorkloadFactory& workloadFactory,
2315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316{
2317 const float qScale = 1.0f;
2318 const int32_t qOffset = 0;
2319
2320 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323 armnn::TensorInfo inputDesc({2, 2}, datatype);
2324 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325 std::vector<float>{2., 3., 3., 4.}));
2326
2327 armnn::TensorInfo outputDesc({2, 4}, datatype);
2328 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2330 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2331
2332 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335}
2336
2337LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338 armnn::IWorkloadFactory& workloadFactory,
2339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340{
2341 const float qScale = 1.0f;
2342 const int32_t qOffset = 0;
2343
2344 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349 std::vector<float>({ 2., 3., 3., 4. })));
2350
2351 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353 qOffset, std::vector<float>(
2354 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359}
2360
2361LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362 armnn::IWorkloadFactory& workloadFactory,
2363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364{
2365 const float qScale = 2.0f;
2366 const int32_t qOffset = 0;
2367
2368 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373 qOffset, std::vector<float>(
2374 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379 qOffset, std::vector<float>(
2380 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2381 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2382 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2383 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2384 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2385 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2386
2387 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389}
2390
2391LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392 armnn::IWorkloadFactory& workloadFactory,
2393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394{
2395 const float qScale = 1.0f;
2396 const int32_t qOffset = 0;
2397
2398 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400 armnn::TensorInfo inputDesc({2, 2}, datatype);
2401 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402 qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404 armnn::TensorInfo outputDesc({2, 4}, datatype);
2405 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2407 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2408
2409 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411}
2412
Jim Flynn4ed6c832019-05-20 11:02:46 +01002413LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002414 armnn::IWorkloadFactory& workloadFactory,
2415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002416{
surmeh013537c2c2018-05-18 16:31:43 +01002417 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002418 unsigned int outputHeight = 6;
2419 unsigned int outputChannels = 3;
2420
surmeh013537c2c2018-05-18 16:31:43 +01002421 unsigned int inputWidth1 = 3;
2422 unsigned int inputHeight1 = 6;
2423 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002424
surmeh013537c2c2018-05-18 16:31:43 +01002425 unsigned int inputWidth2 = 3;
2426 unsigned int inputHeight2 = 6;
2427 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002428
telsoa01c577f2c2018-08-31 09:22:23 +01002429 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002430 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2431 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2432 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002433
2434 LayerTestResult<float,3> ret(outputTensorInfo);
2435
telsoa014fcda012018-03-09 14:13:49 +00002436 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002437 {
2438 1.0f, 2.0f, 3.0f,
2439 4.0f, 5.0f, 6.0f,
2440 7.0f, 8.0f, 9.0f,
2441 10.0f, 11.0f, 12.0f,
2442 13.0f, 14.0f, 15.0f,
2443 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002444
surmeh013537c2c2018-05-18 16:31:43 +01002445 19.0f, 20.0f, 21.0f,
2446 22.0f, 23.0f, 24.0f,
2447 25.0f, 26.0f, 27.0f,
2448 28.0f, 29.0f, 30.0f,
2449 31.0f, 32.0f, 33.0f,
2450 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002451
surmeh013537c2c2018-05-18 16:31:43 +01002452 37.0f, 38.0f, 39.0f,
2453 40.0f, 41.0f, 42.0f,
2454 43.0f, 44.0f, 45.0f,
2455 46.0f, 47.0f, 48.0f,
2456 49.0f, 50.0f, 51.0f,
2457 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002458 })
2459 );
2460
telsoa014fcda012018-03-09 14:13:49 +00002461 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2462 {
surmeh013537c2c2018-05-18 16:31:43 +01002463 1.0f, 2.0f, 3.0f,
2464 4.0f, 5.0f, 6.0f,
2465 7.0f, 8.0f, 9.0f,
2466 10.0f, 11.0f, 12.0f,
2467 13.0f, 14.0f, 15.0f,
2468 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002469
surmeh013537c2c2018-05-18 16:31:43 +01002470 19.0f, 20.0f, 21.0f,
2471 22.0f, 23.0f, 24.0f,
2472 25.0f, 26.0f, 27.0f,
2473 28.0f, 29.0f, 30.0f,
2474 31.0f, 32.0f, 33.0f,
2475 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002476 })
2477 );
2478
2479 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2480 {
surmeh013537c2c2018-05-18 16:31:43 +01002481 37.0f, 38.0f, 39.0f,
2482 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002483 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002484 46.0f, 47.0f, 48.0f,
2485 49.0f, 50.0f, 51.0f,
2486 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002487 })
2488 );
2489
telsoa01c577f2c2018-08-31 09:22:23 +01002490 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002491 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002492
telsoa01c577f2c2018-08-31 09:22:23 +01002493 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002494 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002495
telsoa014fcda012018-03-09 14:13:49 +00002496 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2497
2498 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2499
2500 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2501 subTensorsSupported ?
2502 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2503 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2504
2505 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2506 subTensorsSupported ?
2507 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2508 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2509
Jim Flynne242f2d2019-05-22 14:24:13 +01002510 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002511 armnn::WorkloadInfo info;
2512 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2513 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002514 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2515
2516 data.m_ViewOrigins.push_back(window1);
2517 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002518
Jim Flynn4ed6c832019-05-20 11:02:46 +01002519 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002520
2521 inputHandle1->Allocate();
2522 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002523 outputHandle->Allocate();
2524
2525 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2526 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002527
Derek Lambertif30f7d32019-04-09 10:25:02 +01002528 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002529 workload->Execute();
2530
2531 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2532
2533 return ret;
2534}
2535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002536LayerTestResult<float,4> AdditionTest(
2537 armnn::IWorkloadFactory& workloadFactory,
2538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002539{
2540 unsigned int batchSize = 2;
2541 unsigned int channels = 2;
2542 unsigned int height = 2;
2543 unsigned int width = 3;
2544
2545 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2546 armnn::TensorInfo outputTensorInfo;
2547
2548 unsigned int shape[] = {batchSize, channels, height, width};
2549
2550 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2551 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2552 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2553
2554
2555 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2556 {
2557 0.0f, 2.0f, 1.0f,
2558 0.2f, 1.0f, 2.0f,
2559
2560 1.0f, 2.0f, 1.0f,
2561 0.2f, 1.0f, 2.0f,
2562
2563 0.0f, 2.0f, 1.0f,
2564 4.2f, 1.0f, 2.0f,
2565
2566 0.0f, 0.0f, 1.0f,
2567 0.2f, 1.0f, 2.0f,
2568 }));
2569
2570 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2571 {
2572 1.0f, 2.0f, 1.0f,
2573 0.0f, 1.0f, 2.0f,
2574
2575 1.0f, 2.0f, -2.0f,
2576 0.2f, 1.0f, 2.0f,
2577
2578 0.0f, 2.0f, 1.0f,
2579 4.2f, 0.0f, -3.0f,
2580
2581 0.0f, 0.0f, 1.0f,
2582 0.7f, 1.0f, 5.0f,
2583 }));
2584
2585 LayerTestResult<float,4> ret(outputTensorInfo);
2586 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2587 {
2588 1.0f, 4.0f, 2.0f,
2589 0.2f, 2.0f, 4.0f,
2590
2591 2.0f, 4.0f, -1.0f,
2592 0.4f, 2.0f, 4.0f,
2593
2594 0.0f, 4.0f, 2.0f,
2595 8.4f, 1.0f, -1.0f,
2596
2597 0.0f, 0.0f, 2.0f,
2598 0.9f, 2.0f, 7.0f,
2599 }));
2600
2601 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2602 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2603 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2604
2605 armnn::AdditionQueueDescriptor data;
2606 armnn::WorkloadInfo info;
2607 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2608 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2609 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2610
2611 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2612
2613 inputHandle1->Allocate();
2614 inputHandle2->Allocate();
2615 outputHandle->Allocate();
2616
2617 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2618 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2619
Derek Lambertif30f7d32019-04-09 10:25:02 +01002620 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002621 workload->Execute();
2622
2623 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2624
2625 return ret;
2626}
2627
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002628template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002629LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2630 armnn::IWorkloadFactory& workloadFactory,
2631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002632 float qScale,
2633 int32_t qOffset)
2634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002635 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2636 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2637 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002638
2639 if (armnn::IsQuantizedType<T>())
2640 {
2641 inputTensorInfo1.SetQuantizationScale(qScale);
2642 inputTensorInfo1.SetQuantizationOffset(qOffset);
2643 inputTensorInfo2.SetQuantizationScale(qScale);
2644 inputTensorInfo2.SetQuantizationOffset(qOffset);
2645 outputTensorInfo.SetQuantizationScale(qScale);
2646 outputTensorInfo.SetQuantizationOffset(qOffset);
2647 }
2648
2649 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2650 {
2651 0.0f,
2652 1.0f,
2653
2654 2.0f,
2655 3.0f,
2656
2657 4.0f,
2658 5.0f,
2659 }));
2660
2661 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2662 {
2663 0.5f, 1.5f, 2.5f,
2664 3.5f, 4.5f, 5.5f,
2665 }));
2666
2667 LayerTestResult<T,4> ret(outputTensorInfo);
2668 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2669 {
2670 0.5f, 1.5f, 2.5f,
2671 4.5f, 5.5f, 6.5f,
2672
2673 2.5f, 3.5f, 4.5f,
2674 6.5f, 7.5f, 8.5f,
2675
2676 4.5f, 5.5f, 6.5f,
2677 8.5f, 9.5f, 10.5f,
2678 }));
2679
2680 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2681 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2682 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2683
2684 armnn::AdditionQueueDescriptor data;
2685 armnn::WorkloadInfo info;
2686 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2687 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2688 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2689
2690 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2691
2692 inputHandle1->Allocate();
2693 inputHandle2->Allocate();
2694 outputHandle->Allocate();
2695
2696 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2697 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2698
Derek Lambertif30f7d32019-04-09 10:25:02 +01002699 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002700 workload->Execute();
2701
2702 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2703
2704 return ret;
2705}
2706
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002707template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002708LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002711 float qScale,
2712 int32_t qOffset)
2713{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002714 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2715 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2716 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002717
2718 if (armnn::IsQuantizedType<T>())
2719 {
2720 inputTensorInfo1.SetQuantizationScale(qScale);
2721 inputTensorInfo1.SetQuantizationOffset(qOffset);
2722 inputTensorInfo2.SetQuantizationScale(qScale);
2723 inputTensorInfo2.SetQuantizationOffset(qOffset);
2724 outputTensorInfo.SetQuantizationScale(qScale);
2725 outputTensorInfo.SetQuantizationOffset(qOffset);
2726 }
2727
2728 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2729 {
2730 0.0f, 1.0f, 2.0f,
2731 3.0f, 4.0f, 5.0f,
2732 6.0f, 7.0f, 8.0f,
2733 9.0f, 10.0f, 11.0f,
2734 12.0f, 13.0f, 14.0f,
2735 15.0f, 16.0f, 17.0f,
2736 }));
2737
2738 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2739 {
2740 0.5f,
2741 }));
2742
2743 LayerTestResult<T,4> ret(outputTensorInfo);
2744 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2745 {
2746 0.5f, 1.5f, 2.5f,
2747 3.5f, 4.5f, 5.5f,
2748 6.5f, 7.5f, 8.5f,
2749 9.5f, 10.5f, 11.5f,
2750 12.5f, 13.5f, 14.5f,
2751 15.5f, 16.5f, 17.5f,
2752 }));
2753
2754 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2755 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2756 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2757
2758 armnn::AdditionQueueDescriptor data;
2759 armnn::WorkloadInfo info;
2760 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2761 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2762 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2763
2764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2765
2766 inputHandle1->Allocate();
2767 inputHandle2->Allocate();
2768 outputHandle->Allocate();
2769
2770 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2771 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2772
Derek Lambertif30f7d32019-04-09 10:25:02 +01002773 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002774 workload->Execute();
2775
2776 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2777
2778 return ret;
2779}
2780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002781LayerTestResult<float, 4> AdditionBroadcastTest(
2782 armnn::IWorkloadFactory& workloadFactory,
2783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002784{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002785 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2786 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002787}
2788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002789LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2790 armnn::IWorkloadFactory& workloadFactory,
2791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002792{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002793 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2794 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002795}
2796
Sadik Armagan2999a022019-04-09 14:20:12 +01002797LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2798 armnn::IWorkloadFactory& workloadFactory,
2799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2800{
2801 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2802 workloadFactory, memoryManager, 2.f, 0);
2803}
2804
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002805LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002808{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002809 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2810 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002811}
2812
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002813LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2814 armnn::IWorkloadFactory& workloadFactory,
2815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002816{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002817 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2818 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002819}
2820
Sadik Armagan2999a022019-04-09 14:20:12 +01002821LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2824{
2825 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2826 workloadFactory, memoryManager, 0.1333333f, 0);
2827}
2828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002829LayerTestResult<float,4> CompareAdditionTest(
2830 armnn::IWorkloadFactory& workloadFactory,
2831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2832 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002833{
2834 unsigned int batchSize = 4;
2835 unsigned int channels = 1;
2836 unsigned int height = 2;
2837 unsigned int width = 3;
2838
2839 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2840 armnn::TensorInfo outputTensorInfo;
2841
2842 unsigned int shape[] = {batchSize, channels, height, width};
2843
2844 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2845 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2846 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2847
2848 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2849 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2850
2851 LayerTestResult<float,4> ret(outputTensorInfo);
2852
2853 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2854 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2855 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2856
2857 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2858 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2859 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2860
2861 armnn::AdditionQueueDescriptor data;
2862 armnn::WorkloadInfo info;
2863 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2864 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2865 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2866
2867 armnn::AdditionQueueDescriptor refData = data;
2868 armnn::WorkloadInfo refInfo = info;
2869 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2870 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2871 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2872
2873 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2874 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2875
2876 inputHandle1->Allocate();
2877 inputHandle2->Allocate();
2878 outputHandle->Allocate();
2879 inputHandle1Ref->Allocate();
2880 inputHandle2Ref->Allocate();
2881 outputHandleRef->Allocate();
2882
2883 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2884 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2885 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2886 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2887
Derek Lambertif30f7d32019-04-09 10:25:02 +01002888 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002889 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002890 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002891 workloadRef->Execute();
2892
2893 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2894 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2895
2896 return ret;
2897}
2898
surmeh01bceff2f2018-03-29 16:29:27 +01002899namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002900template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002901LayerTestResult<T, 4> DivisionTestHelper(
2902 armnn::IWorkloadFactory& workloadFactory,
2903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2904 const unsigned int shape0[4],
2905 const std::vector<T>& values0,
2906 float scale0,
2907 int32_t offset0,
2908 const unsigned int shape1[4],
2909 const std::vector<T> & values1,
2910 float scale1,
2911 int32_t offset1,
2912 const unsigned int outShape[4],
2913 const std::vector<T> & outValues,
2914 float outScale,
2915 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002916{
Sadik Armagan2999a022019-04-09 14:20:12 +01002917 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2918 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2919 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002920
David Beck5cd01f32018-09-12 16:00:08 +01002921 inputTensorInfo0.SetQuantizationScale(scale0);
2922 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002923
David Beck5cd01f32018-09-12 16:00:08 +01002924 inputTensorInfo1.SetQuantizationScale(scale1);
2925 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002926
David Beck5cd01f32018-09-12 16:00:08 +01002927 outputTensorInfo.SetQuantizationScale(outScale);
2928 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002929
David Beck5cd01f32018-09-12 16:00:08 +01002930 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2931 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002932
David Beck5cd01f32018-09-12 16:00:08 +01002933 LayerTestResult<T, 4> result(outputTensorInfo);
2934 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002935
David Beck5cd01f32018-09-12 16:00:08 +01002936 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2937 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2938 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002939
David Beck5cd01f32018-09-12 16:00:08 +01002940 armnn::DivisionQueueDescriptor data;
2941 armnn::WorkloadInfo info;
2942 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2943 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2944 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002945
David Beck5cd01f32018-09-12 16:00:08 +01002946 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002947
David Beck5cd01f32018-09-12 16:00:08 +01002948 inputHandle0->Allocate();
2949 inputHandle1->Allocate();
2950 outputHandle->Allocate();
2951
2952 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2953 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2954
Derek Lambertif30f7d32019-04-09 10:25:02 +01002955 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002956 workload->Execute();
2957
2958 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2959
2960 return result;
2961}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002962} // anonymous namespace
2963
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002964LayerTestResult<float,4> DivisionByZeroTest(
2965 armnn::IWorkloadFactory& workloadFactory,
2966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002967{
2968 const unsigned int width = 2;
2969 const unsigned int height = 2;
2970 const unsigned int channelCount = 2;
2971 const unsigned int batchSize = 2;
2972
2973 unsigned int shape[] = { batchSize, channelCount, height, width };
2974
2975 std::vector<float> input0({
2976 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2977 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2978
2979 std::vector<float> input1({
2980 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2981 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2982
2983 std::vector<float> output({
2984 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2985 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2986
Sadik Armagan2999a022019-04-09 14:20:12 +01002987 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2988 memoryManager,
2989 shape, input0, 1.0f, 0,
2990 shape, input1, 1.0f, 0,
2991 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002992}
2993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002994LayerTestResult<float,4> DivisionTest(
2995 armnn::IWorkloadFactory& workloadFactory,
2996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002997{
2998 const unsigned int width = 2;
2999 const unsigned int height = 2;
3000 const unsigned int channelCount = 2;
3001 const unsigned int batchSize = 2;
3002
3003 unsigned int shape[] = { batchSize, channelCount, height, width };
3004
3005 std::vector<float> input0({
3006 2, 2, 2, 2, 3, 3, 3, 3,
3007 4, 4, 4, 4, 5, 5, 5, 5 });
3008
3009 std::vector<float> input1({
3010 1, 1, 1, 1, 2, 2, 2, 2,
3011 4, 4, 4, 4, 4, 4, 4, 4 });
3012
3013 std::vector<float> output({
3014 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
3015 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
3016
David Beck5cd01f32018-09-12 16:00:08 +01003017
Sadik Armagan2999a022019-04-09 14:20:12 +01003018 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3019 memoryManager,
3020 shape, input0, 1.0f, 0,
3021 shape, input1, 1.0f, 0,
3022 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003023}
3024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003025LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3026 armnn::IWorkloadFactory& workloadFactory,
3027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003028{
3029 unsigned int shape0[] = { 1, 2, 2, 2 };
3030 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3031
3032 unsigned int shape1[] = { 1, 1, 1, 1 };
3033 std::vector<float> input1({ 2 });
3034
3035 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3036
David Beck5cd01f32018-09-12 16:00:08 +01003037
Sadik Armagan2999a022019-04-09 14:20:12 +01003038 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3039 memoryManager,
3040 shape0, input0, 1.0f, 0,
3041 shape1, input1, 1.0f, 0,
3042 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003043}
3044
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003045LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3046 armnn::IWorkloadFactory& workloadFactory,
3047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003048{
3049 unsigned int shape0[] = { 1, 3, 3, 2 };
3050 std::vector<float> input0({
3051 1, 4, 3, 8, 5, 12,
3052 7, 16, 9, 20, 11, 24,
3053 13, 28, 15, 32, 17, 36});
3054
3055 unsigned int shape1[] = { 1, 1, 1, 2 };
3056 std::vector<float> input1({ 1, 2 });
3057
3058 std::vector<float> output({
3059 1, 2, 3, 4, 5, 6,
3060 7, 8, 9, 10, 11, 12,
3061 13, 14, 15, 16, 17, 18});
3062
Sadik Armagan2999a022019-04-09 14:20:12 +01003063 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3064 memoryManager,
3065 shape0, input0, 1.0f, 0,
3066 shape1, input1, 1.0f, 0,
3067 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003068}
3069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003070LayerTestResult<uint8_t,4> DivisionUint8Test(
3071 armnn::IWorkloadFactory& workloadFactory,
3072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003073{
3074 const unsigned int width = 2;
3075 const unsigned int height = 2;
3076 const unsigned int channelCount = 2;
3077 const unsigned int batchSize = 2;
3078
3079 unsigned int shape[] = { batchSize, channelCount, height, width };
3080
3081 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3082 4, 4, 4, 4, 5, 5, 5, 5 });
3083
3084 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3085 4, 4, 4, 4, 4, 4, 4, 4 });
3086
3087 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3088 4, 4, 4, 4, 5, 5, 5, 5});
3089
3090
Sadik Armagan2999a022019-04-09 14:20:12 +01003091 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3092 memoryManager,
3093 shape, input0, 1.0f, 0,
3094 shape, input1, 1.0f, 0,
3095 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003096}
3097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003098LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3099 armnn::IWorkloadFactory& workloadFactory,
3100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003101{
3102 unsigned int shape0[] = { 1, 2, 2, 2 };
3103 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3104
3105 unsigned int shape1[] = { 1, 1, 1, 1 };
3106 std::vector<uint8_t> input1({ 2 });
3107
3108 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3109
Sadik Armagan2999a022019-04-09 14:20:12 +01003110 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3111 memoryManager,
3112 shape0, input0, 1.0f, 0,
3113 shape1, input1, 1.0f, 0,
3114 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003115}
3116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003117LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3118 armnn::IWorkloadFactory& workloadFactory,
3119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003120{
3121 unsigned int shape0[] = { 1, 3, 3, 2 };
3122 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
3123 7, 16, 9, 20, 11, 24,
3124 13, 28, 15, 32, 17, 36});
3125
3126 unsigned int shape1[] = { 1, 1, 1, 2 };
3127 std::vector<uint8_t> input1({ 1, 2 });
3128
3129 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
3130 7, 8, 9, 10, 11, 12,
3131 13, 14, 15, 16, 17, 18});
3132
Sadik Armagan2999a022019-04-09 14:20:12 +01003133 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3134 memoryManager,
3135 shape0, input0, 1.0f, 0,
3136 shape1, input1, 1.0f, 0,
3137 shape0, output, 1.0f, 0);
3138}
3139
3140LayerTestResult<int16_t,4> DivisionInt16Test(
3141 armnn::IWorkloadFactory& workloadFactory,
3142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3143{
3144 unsigned int shape[] = { 2, 2, 2, 2 };
3145
3146 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3147 4, 4, 4, 4, 5, 5, 5, 5 });
3148
3149 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3150 4, 4, 4, 4, 4, 4, 4, 4 });
3151
3152 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3153 4, 4, 4, 4, 5, 5, 5, 5});
3154
3155
3156 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3157 memoryManager,
3158 shape, input0, 1.0f, 0,
3159 shape, input1, 1.0f, 0,
3160 shape, output, 0.25f, 0);
3161}
3162
3163LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3164 armnn::IWorkloadFactory& workloadFactory,
3165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3166{
3167 unsigned int shape0[] = { 1, 2, 2, 2 };
3168 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3169
3170 unsigned int shape1[] = { 1, 1, 1, 1 };
3171 std::vector<int16_t> input1({ 2 });
3172
3173 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3174
3175 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3176 memoryManager,
3177 shape0, input0, 1.0f, 0,
3178 shape1, input1, 1.0f, 0,
3179 shape0, output, 1.0f, 0);
3180}
3181
3182LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3183 armnn::IWorkloadFactory& workloadFactory,
3184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3185{
3186 unsigned int shape0[] = { 1, 3, 3, 2 };
3187 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
3188 7, 16, 9, 20, 11, 24,
3189 13, 28, 15, 32, 17, 36});
3190
3191 unsigned int shape1[] = { 1, 1, 1, 2 };
3192 std::vector<int16_t> input1({ 1, 2 });
3193
3194 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
3195 7, 8, 9, 10, 11, 12,
3196 13, 14, 15, 16, 17, 18});
3197
3198 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3199 memoryManager,
3200 shape0, input0, 1.0f, 0,
3201 shape1, input1, 1.0f, 0,
3202 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003203}
3204
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003205template<typename DescriptorType>
3206std::unique_ptr<armnn::IWorkload> CreateWorkload(
3207 const armnn::IWorkloadFactory& workloadFactory,
3208 const armnn::WorkloadInfo& info,
3209 const DescriptorType& descriptor)
3210{
3211 return CreateWorkload(workloadFactory, info, descriptor);
3212};
3213
3214template<>
3215std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3216 const armnn::IWorkloadFactory& workloadFactory,
3217 const armnn::WorkloadInfo& info,
3218 const armnn::MaximumQueueDescriptor& descriptor)
3219{
3220 return workloadFactory.CreateMaximum(descriptor, info);
3221}
3222
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003223template<>
3224std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3225 const armnn::IWorkloadFactory& workloadFactory,
3226 const armnn::WorkloadInfo& info,
3227 const armnn::MinimumQueueDescriptor& descriptor)
3228{
3229 return workloadFactory.CreateMinimum(descriptor, info);
3230}
3231
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003232template<>
3233std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3234 const armnn::IWorkloadFactory& workloadFactory,
3235 const armnn::WorkloadInfo& info,
3236 const armnn::EqualQueueDescriptor& descriptor)
3237{
3238 return workloadFactory.CreateEqual(descriptor, info);
3239}
3240
FrancisMurtagh878f0232018-12-19 10:56:15 +00003241template<>
3242std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3243 const armnn::IWorkloadFactory& workloadFactory,
3244 const armnn::WorkloadInfo& info,
3245 const armnn::GreaterQueueDescriptor& descriptor)
3246{
3247 return workloadFactory.CreateGreater(descriptor, info);
3248}
3249
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003250namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00003251
3252template <typename Descriptor,
3253 armnn::DataType ArmnnTypeInput,
3254 armnn::DataType ArmnnTypeOutput,
3255 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3256 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3257LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3258 armnn::IWorkloadFactory & workloadFactory,
3259 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3260 const unsigned int shape0[4], std::vector<TInput> values0,
3261 const unsigned int shape1[4], std::vector<TInput> values1,
3262 const unsigned int outShape[4], std::vector<TOutput> outValues,
3263 float qScale = 0.0f, int qOffset = 0)
3264{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01003265 const uint32_t dimensionCount = 4;
kevmay012b4d88e2019-01-24 14:05:09 +00003266 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3267 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3268 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3269
3270 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3271 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3272
3273 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003274 {
kevmay012b4d88e2019-01-24 14:05:09 +00003275 inputTensorInfo0.SetQuantizationScale(qScale);
3276 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003277
kevmay012b4d88e2019-01-24 14:05:09 +00003278 inputTensorInfo1.SetQuantizationScale(qScale);
3279 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003280
kevmay012b4d88e2019-01-24 14:05:09 +00003281 outputTensorInfo.SetQuantizationScale(qScale);
3282 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003283 }
kevmay012b4d88e2019-01-24 14:05:09 +00003284
3285 LayerTestResult<TOutput,4> ret(outputTensorInfo);
3286
3287 if(ArmnnTypeOutput == armnn::DataType::Boolean)
3288 {
3289 ret.compareBoolean = true;
3290 }
3291
3292 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3293 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3294 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3295
3296 Descriptor data;
3297 armnn::WorkloadInfo info;
3298 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3299 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3300 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3301 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3302
3303 inputHandle0->Allocate();
3304 inputHandle1->Allocate();
3305 outputHandle->Allocate();
3306
3307 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3308 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3309
Derek Lambertif30f7d32019-04-09 10:25:02 +01003310 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00003311 ExecuteWorkload(*workload, memoryManager);
3312
3313 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3314
3315 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3316 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003317}
3318
kevmay012b4d88e2019-01-24 14:05:09 +00003319template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3320LayerTestResult<T, 4> ElementwiseTestHelper(
3321 armnn::IWorkloadFactory & workloadFactory,
3322 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3323 const unsigned int shape0[4], std::vector<T> values0,
3324 const unsigned int shape1[4], std::vector<T> values1,
3325 const unsigned int outShape[4], std::vector<T> outValues,
3326 float qScale = 0.0f, int qOffset = 0)
3327{
3328 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3329 (workloadFactory,
3330 memoryManager,
3331 shape0,
3332 values0,
3333 shape1,
3334 values1,
3335 outShape,
3336 outValues,
3337 qScale,
3338 qOffset);
3339}
3340}
3341
3342LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003344{
3345 const unsigned int width = 2;
3346 const unsigned int height = 2;
3347 const unsigned int channelCount = 2;
3348 const unsigned int batchSize = 2;
3349
3350 unsigned int shape[] = { batchSize, channelCount, height, width };
3351
3352 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3353 3, 3, 3, 3, 4, 4, 4, 4 });
3354
3355 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3356 5, 5, 5, 5, 4, 4, 4, 4 });
3357
kevmay012b4d88e2019-01-24 14:05:09 +00003358 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3359 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003360
kevmay012b4d88e2019-01-24 14:05:09 +00003361 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003362 workloadFactory,
3363 memoryManager,
3364 shape,
3365 input0,
3366 shape,
3367 input1,
3368 shape,
3369 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003370}
3371
kevmay012b4d88e2019-01-24 14:05:09 +00003372LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003373 armnn::IWorkloadFactory& workloadFactory,
3374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3375{
3376 unsigned int shape0[] = { 1, 2, 2, 2 };
3377 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3378
3379 unsigned int shape1[] = { 1, 1, 1, 1 };
3380 std::vector<float> input1({ 1 });
3381
kevmay012b4d88e2019-01-24 14:05:09 +00003382 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003383
kevmay012b4d88e2019-01-24 14:05:09 +00003384 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003385 workloadFactory,
3386 memoryManager,
3387 shape0,
3388 input0,
3389 shape1,
3390 input1,
3391 shape0,
3392 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003393}
3394
kevmay012b4d88e2019-01-24 14:05:09 +00003395LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003396 armnn::IWorkloadFactory& workloadFactory,
3397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3398{
3399 const unsigned int shape0[] = { 1, 2, 2, 3 };
3400 const unsigned int shape1[] = { 1, 1, 1, 3 };
3401
3402 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3403 7, 8, 9, 10, 11, 12 });
3404
3405 std::vector<float> input1({ 1, 2, 3});
3406
kevmay012b4d88e2019-01-24 14:05:09 +00003407 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3408 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003409
kevmay012b4d88e2019-01-24 14:05:09 +00003410 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003411 workloadFactory,
3412 memoryManager,
3413 shape0,
3414 input0,
3415 shape1,
3416 input1,
3417 shape0,
3418 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003419}
3420
3421LayerTestResult<uint8_t, 4> EqualUint8Test(
3422 armnn::IWorkloadFactory& workloadFactory,
3423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3424{
3425 unsigned int shape[] = { 2, 2, 2, 2 };
3426
3427 // See dequantized values to the right.
3428 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003429 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003430
3431 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3432 3, 3, 3, 3, 5, 5, 5, 5 });
3433
3434 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3435 1, 1, 1, 1, 0, 0, 0, 0 });
3436
kevmay012b4d88e2019-01-24 14:05:09 +00003437 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3438 armnn::DataType::QuantisedAsymm8,
3439 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003440 workloadFactory,
3441 memoryManager,
3442 shape,
3443 input0,
3444 shape,
3445 input1,
3446 shape,
3447 output,
3448 1.0f,
3449 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003450}
3451
3452LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3453 armnn::IWorkloadFactory& workloadFactory,
3454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3455{
3456 const unsigned int shape0[] = { 1, 2, 2, 3 };
3457 const unsigned int shape1[] = { 1, 1, 1, 1 };
3458
3459 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3460 7, 8, 9, 10, 11, 12 });
3461
3462 std::vector<uint8_t> input1({ 1 });
3463
3464 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3465 0, 0, 0, 0, 0, 0 });
3466
kevmay012b4d88e2019-01-24 14:05:09 +00003467 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3468 armnn::DataType::QuantisedAsymm8,
3469 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003470 workloadFactory,
3471 memoryManager,
3472 shape0,
3473 input0,
3474 shape1,
3475 input1,
3476 shape0,
3477 output,
3478 1.0f,
3479 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003480}
3481
3482LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3483 armnn::IWorkloadFactory& workloadFactory,
3484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3485{
3486 const unsigned int shape0[] = { 1, 2, 2, 3 };
3487 const unsigned int shape1[] = { 1, 1, 1, 3 };
3488
3489 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3490 7, 8, 9, 10, 11, 12 });
3491
3492 std::vector<uint8_t> input1({ 1, 1, 3});
3493
3494 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3495 0, 0, 0, 0, 0, 0 });
3496
kevmay012b4d88e2019-01-24 14:05:09 +00003497 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3498 armnn::DataType::QuantisedAsymm8,
3499 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003500 workloadFactory,
3501 memoryManager,
3502 shape0,
3503 input0,
3504 shape1,
3505 input1,
3506 shape0,
3507 output,
3508 1.0f,
3509 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003510}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003511
kevmay012b4d88e2019-01-24 14:05:09 +00003512LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3514{
3515 const unsigned int width = 2;
3516 const unsigned int height = 2;
3517 const unsigned int channelCount = 2;
3518 const unsigned int batchSize = 2;
3519
3520 unsigned int shape[] = { batchSize, channelCount, height, width };
3521
3522 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3523 3, 3, 3, 3, 4, 4, 4, 4 });
3524
3525 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3526 5, 5, 5, 5, 4, 4, 4, 4 });
3527
kevmay012b4d88e2019-01-24 14:05:09 +00003528 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3529 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003530
kevmay012b4d88e2019-01-24 14:05:09 +00003531 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003532 workloadFactory,
3533 memoryManager,
3534 shape,
3535 input0,
3536 shape,
3537 input1,
3538 shape,
3539 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003540}
3541
kevmay012b4d88e2019-01-24 14:05:09 +00003542LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003543 armnn::IWorkloadFactory& workloadFactory,
3544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3545{
3546 unsigned int shape0[] = { 1, 2, 2, 2 };
3547 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3548
3549 unsigned int shape1[] = { 1, 1, 1, 1 };
3550 std::vector<float> input1({ 1 });
3551
kevmay012b4d88e2019-01-24 14:05:09 +00003552 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003553
kevmay012b4d88e2019-01-24 14:05:09 +00003554 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003555 workloadFactory,
3556 memoryManager,
3557 shape0,
3558 input0,
3559 shape1,
3560 input1,
3561 shape0,
3562 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003563}
3564
kevmay012b4d88e2019-01-24 14:05:09 +00003565LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003566 armnn::IWorkloadFactory& workloadFactory,
3567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3568{
3569 const unsigned int shape0[] = { 1, 2, 2, 3 };
3570 const unsigned int shape1[] = { 1, 1, 1, 3 };
3571
3572 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3573 7, 8, 9, 10, 11, 12 });
3574
3575 std::vector<float> input1({ 1, 3, 2});
3576
kevmay012b4d88e2019-01-24 14:05:09 +00003577 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3578 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003579
kevmay012b4d88e2019-01-24 14:05:09 +00003580 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003581 workloadFactory,
3582 memoryManager,
3583 shape0,
3584 input0,
3585 shape1,
3586 input1,
3587 shape0,
3588 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003589}
3590
3591LayerTestResult<uint8_t, 4> GreaterUint8Test(
3592 armnn::IWorkloadFactory& workloadFactory,
3593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3594{
3595 unsigned int shape[] = { 2, 2, 2, 2 };
3596
3597 // See dequantized values to the right.
3598 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3599 3, 3, 3, 3, 5, 5, 5, 5 });
3600
3601 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3602 2, 2, 2, 2, 5, 5, 5, 5 });
3603
3604 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3605 1, 1, 1, 1, 0, 0, 0, 0 });
3606
kevmay012b4d88e2019-01-24 14:05:09 +00003607 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3608 armnn::DataType::QuantisedAsymm8,
3609 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003610 workloadFactory,
3611 memoryManager,
3612 shape,
3613 input0,
3614 shape,
3615 input1,
3616 shape,
3617 output,
3618 1.0f,
3619 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003620}
3621
3622LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3623 armnn::IWorkloadFactory& workloadFactory,
3624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3625{
3626 const unsigned int shape0[] = { 1, 2, 2, 3 };
3627 const unsigned int shape1[] = { 1, 1, 1, 1 };
3628
3629 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3630 7, 8, 9, 10, 11, 12 });
3631
3632 std::vector<uint8_t> input1({ 1 });
3633
3634 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3635 1, 1, 1, 1, 1, 1 });
3636
kevmay012b4d88e2019-01-24 14:05:09 +00003637 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3638 armnn::DataType::QuantisedAsymm8,
3639 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003640 workloadFactory,
3641 memoryManager,
3642 shape0,
3643 input0,
3644 shape1,
3645 input1,
3646 shape0,
3647 output,
3648 1.0f,
3649 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003650}
3651
3652LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3653 armnn::IWorkloadFactory& workloadFactory,
3654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3655{
3656 const unsigned int shape0[] = { 1, 2, 2, 3 };
3657 const unsigned int shape1[] = { 1, 1, 1, 3 };
3658
3659 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3660 7, 8, 9, 10, 11, 12 });
3661
3662 std::vector<uint8_t> input1({ 1, 1, 3});
3663
3664 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3665 1, 1, 1, 1, 1, 1 });
3666
kevmay012b4d88e2019-01-24 14:05:09 +00003667 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3668 armnn::DataType::QuantisedAsymm8,
3669 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003670 workloadFactory,
3671 memoryManager,
3672 shape0,
3673 input0,
3674 shape1,
3675 input1,
3676 shape0,
3677 output,
3678 1.0f,
3679 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003680}
3681
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003682LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3684{
3685 const unsigned int width = 2;
3686 const unsigned int height = 2;
3687 const unsigned int channelCount = 2;
3688 const unsigned int batchSize = 2;
3689
3690 unsigned int shape[] = { batchSize, channelCount, height, width };
3691
3692 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3693 3, 3, 3, 3, 4, 4, 4, 4 });
3694
3695 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3696 4, 4, 4, 4, 5, 5, 5, 5 });
3697
3698 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3699 4, 4, 4, 4, 5, 5, 5, 5 });
3700
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003701 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3702 workloadFactory,
3703 memoryManager,
3704 shape,
3705 input0,
3706 shape,
3707 input1,
3708 shape,
3709 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003710}
3711
3712LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3713 armnn::IWorkloadFactory& workloadFactory,
3714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3715{
3716 unsigned int shape0[] = { 1, 2, 2, 2 };
3717 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3718
3719 unsigned int shape1[] = { 1, 1, 1, 1 };
3720 std::vector<float> input1({ 2 });
3721
3722 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3723
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003724 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3725 workloadFactory,
3726 memoryManager,
3727 shape0,
3728 input0,
3729 shape1,
3730 input1,
3731 shape0,
3732 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003733}
3734
3735LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3736 armnn::IWorkloadFactory& workloadFactory,
3737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3738{
3739 const unsigned int shape0[] = { 1, 2, 2, 3 };
3740 const unsigned int shape1[] = { 1, 1, 1, 3 };
3741
3742 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3743 7, 8, 9, 10, 11, 12 });
3744
3745 std::vector<float> input1({ 1, 2, 3});
3746
3747 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003748 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003749
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003750 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3751 workloadFactory,
3752 memoryManager,
3753 shape0,
3754 input0,
3755 shape1,
3756 input1,
3757 shape0,
3758 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003759}
3760
3761LayerTestResult<uint8_t, 4> MaximumUint8Test(
3762 armnn::IWorkloadFactory& workloadFactory,
3763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3764{
3765 unsigned int shape[] = { 2, 2, 2, 2 };
3766
3767 // See dequantized values to the right.
3768 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3769 3, 3, 3, 3, 4, 4, 4, 4 });
3770
3771 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3772 4, 4, 4, 4, 5, 5, 5, 5 });
3773
3774 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3775 4, 4, 4, 4, 5, 5, 5, 5 });
3776
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003777 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3778 workloadFactory,
3779 memoryManager,
3780 shape,
3781 input0,
3782 shape,
3783 input1,
3784 shape,
3785 output,
3786 1.0f,
3787 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003788}
3789
3790LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3791 armnn::IWorkloadFactory& workloadFactory,
3792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3793{
3794 const unsigned int shape0[] = { 1, 2, 2, 3 };
3795 const unsigned int shape1[] = { 1, 1, 1, 1 };
3796
3797 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3798 7, 8, 9, 10, 11, 12 });
3799
3800 std::vector<uint8_t> input1({2});
3801
3802 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3803 7, 8, 9, 10, 11, 12 });
3804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003805 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3806 workloadFactory,
3807 memoryManager,
3808 shape0,
3809 input0,
3810 shape1,
3811 input1,
3812 shape0,
3813 output,
3814 1.0f,
3815 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003816}
3817
3818LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3819 armnn::IWorkloadFactory& workloadFactory,
3820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3821{
3822 const unsigned int shape0[] = { 1, 2, 2, 3 };
3823 const unsigned int shape1[] = { 1, 1, 1, 3 };
3824
3825 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3826 7, 8, 9, 10, 11, 12 });
3827
3828 std::vector<uint8_t> input1({ 1, 10, 3});
3829
3830 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3831 7, 10, 9, 10, 11, 12 });
3832
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003833 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3834 workloadFactory,
3835 memoryManager,
3836 shape0,
3837 input0,
3838 shape1,
3839 input1,
3840 shape0,
3841 output,
3842 1.0f,
3843 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003844}
3845
Sadik Armagan2999a022019-04-09 14:20:12 +01003846LayerTestResult<int16_t, 4> MaximumInt16Test(
3847 armnn::IWorkloadFactory& workloadFactory,
3848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3849{
3850 unsigned int shape[] = { 2, 2, 2, 2 };
3851
3852 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3853 3, 3, 3, 3, 4, 4, 4, 4 });
3854
3855 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3856 4, 4, 4, 4, 5, 5, 5, 5 });
3857
3858 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3859 4, 4, 4, 4, 5, 5, 5, 5 });
3860
3861 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3862 workloadFactory,
3863 memoryManager,
3864 shape,
3865 input0,
3866 shape,
3867 input1,
3868 shape,
3869 output,
3870 1.0f,
3871 0);
3872}
3873
3874LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3875 armnn::IWorkloadFactory& workloadFactory,
3876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3877{
3878 const unsigned int shape0[] = { 1, 2, 2, 3 };
3879 const unsigned int shape1[] = { 1, 1, 1, 1 };
3880
3881 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3882 7, 8, 9, 10, 11, 12 });
3883
3884 std::vector<int16_t> input1({2});
3885
3886 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3887 7, 8, 9, 10, 11, 12 });
3888
3889 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3890 workloadFactory,
3891 memoryManager,
3892 shape0,
3893 input0,
3894 shape1,
3895 input1,
3896 shape0,
3897 output,
3898 1.0f,
3899 0);
3900}
3901
3902LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3903 armnn::IWorkloadFactory& workloadFactory,
3904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3905{
3906 const unsigned int shape0[] = { 1, 2, 2, 3 };
3907 const unsigned int shape1[] = { 1, 1, 1, 3 };
3908
3909 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3910 7, 8, 9, 10, 11, 12 });
3911
3912 std::vector<int16_t> input1({ 1, 10, 3});
3913
3914 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3915 7, 10, 9, 10, 11, 12 });
3916
3917 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3918 workloadFactory,
3919 memoryManager,
3920 shape0,
3921 input0,
3922 shape1,
3923 input1,
3924 shape0,
3925 output,
3926 1.0f,
3927 0);
3928}
3929
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003930LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3931 armnn::IWorkloadFactory& workloadFactory,
3932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3933{
3934 unsigned int shape0[] = { 1, 2, 2, 2 };
3935 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3936
3937 unsigned int shape1[] = { 1, 1, 1, 1 };
3938 std::vector<float> input1({ 2 });
3939
3940 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3941
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003942 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3943 workloadFactory,
3944 memoryManager,
3945 shape0,
3946 input0,
3947 shape1,
3948 input1,
3949 shape0,
3950 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003951}
3952
3953
3954LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3955 armnn::IWorkloadFactory& workloadFactory,
3956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3957{
3958 unsigned int shape0[] = { 1, 2, 2, 2 };
3959 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3960
3961 unsigned int shape1[] = { 1, 1, 1, 1 };
3962 std::vector<float> input1({ 5 });
3963
3964 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3965
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003966 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3967 workloadFactory,
3968 memoryManager,
3969 shape0,
3970 input0,
3971 shape1,
3972 input1,
3973 shape0,
3974 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003975}
3976
3977LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3978 armnn::IWorkloadFactory & workloadFactory,
3979 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3980{
3981 const unsigned int shape0[] = { 1, 2, 2, 3 };
3982 const unsigned int shape1[] = { 1, 1, 1, 3 };
3983
3984 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3985 7, 1, 2, 3, 4, 5 });
3986
3987 std::vector<uint8_t> input1({ 1, 2, 3});
3988
3989 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3990 1, 1, 2, 1, 2, 3 });
3991
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003992 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3993 workloadFactory,
3994 memoryManager,
3995 shape0,
3996 input0,
3997 shape1,
3998 input1,
3999 shape0,
4000 output,
4001 1.0f,
4002 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004003}
4004
Sadik Armagan2999a022019-04-09 14:20:12 +01004005LayerTestResult<int16_t, 4> MinimumInt16Test(
4006 armnn::IWorkloadFactory& workloadFactory,
4007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4008{
4009 unsigned int shape[] = { 2, 2, 2, 2 };
4010
4011 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4012 3, 3, 3, 3, 4, 4, 4, 4 });
4013
4014 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4015 4, 4, 4, 4, 5, 5, 5, 5 });
4016
4017 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4018 3, 3, 3, 3, 4, 4, 4, 4 });
4019
4020 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4021 workloadFactory,
4022 memoryManager,
4023 shape,
4024 input0,
4025 shape,
4026 input1,
4027 shape,
4028 output,
4029 1.0f,
4030 0);
4031}
4032
4033LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4034 armnn::IWorkloadFactory& workloadFactory,
4035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4036{
4037 const unsigned int shape0[] = { 1, 2, 2, 3 };
4038 const unsigned int shape1[] = { 1, 1, 1, 1 };
4039
4040 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4041 7, 8, 9, 10, 11, 12 });
4042
4043 std::vector<int16_t> input1({2});
4044
4045 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4046 2, 2, 2, 2, 2, 2 });
4047
4048 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4049 workloadFactory,
4050 memoryManager,
4051 shape0,
4052 input0,
4053 shape1,
4054 input1,
4055 shape0,
4056 output,
4057 1.0f,
4058 0);
4059}
4060
4061LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4062 armnn::IWorkloadFactory& workloadFactory,
4063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4064{
4065 const unsigned int shape0[] = { 1, 2, 2, 3 };
4066 const unsigned int shape1[] = { 1, 1, 1, 3 };
4067
4068 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4069 7, 8, 9, 10, 11, 12 });
4070
4071 std::vector<int16_t> input1({ 1, 10, 3});
4072
4073 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4074 1, 8, 3, 1, 10, 3 });
4075
4076 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4077 workloadFactory,
4078 memoryManager,
4079 shape0,
4080 input0,
4081 shape1,
4082 input1,
4083 shape0,
4084 output,
4085 1.0f,
4086 0);
4087}
4088
Francis Murtaghe7a86a42018-08-29 12:42:10 +01004089namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004090LayerTestResult<float,4> MultiplicationTestHelper(
4091 armnn::IWorkloadFactory& workloadFactory,
4092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4093 const unsigned int shape0[4],
4094 const std::vector<float> & values0,
4095 const unsigned int shape1[4],
4096 const std::vector<float> & values1,
4097 const unsigned int outShape[4],
4098 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00004099{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01004100 const uint32_t dimensionCount = 4;
surmeh01bceff2f2018-03-29 16:29:27 +01004101 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
4102 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
4103 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00004104
surmeh01bceff2f2018-03-29 16:29:27 +01004105 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
4106 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004107
4108 LayerTestResult<float,4> ret(outputTensorInfo);
4109
4110 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4111 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4112 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4113
4114 armnn::MultiplicationQueueDescriptor data;
4115 armnn::WorkloadInfo info;
4116 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4117 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4118 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4119
4120 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4121
4122 inputHandle0->Allocate();
4123 inputHandle1->Allocate();
4124 outputHandle->Allocate();
4125
4126 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4127 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4128
Derek Lambertif30f7d32019-04-09 10:25:02 +01004129 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004130 workload->Execute();
4131
4132 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4133
surmeh01bceff2f2018-03-29 16:29:27 +01004134 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004135 return ret;
4136}
surmeh01bceff2f2018-03-29 16:29:27 +01004137} // anonymous namespace
4138
4139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004140LayerTestResult<float,4> MultiplicationTest(
4141 armnn::IWorkloadFactory& workloadFactory,
4142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004143{
4144 const unsigned int width = 2;
4145 const unsigned int height = 2;
4146 const unsigned int channelCount = 2;
4147 const unsigned int batchSize = 2;
4148
4149 unsigned int shape[] = { batchSize, channelCount, height, width };
4150
4151 std::vector<float> input0({
4152 1, 1, 1, 1, 2, 2, 2, 2,
4153 3, 3, 3, 3, 4, 4, 4, 4 });
4154
4155 std::vector<float> input1({
4156 2, 2, 2, 2, 3, 3, 3, 3,
4157 4, 4, 4, 4, 5, 5, 5, 5 });
4158
4159 std::vector<float> output({
4160 2, 2, 2, 2, 6, 6, 6, 6,
4161 12, 12, 12, 12, 20, 20, 20, 20 });
4162
4163 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004164 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004165 shape,
4166 input0,
4167 shape,
4168 input1,
4169 shape,
4170 output);
4171}
4172
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004173LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4174 armnn::IWorkloadFactory& workloadFactory,
4175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004176{
4177 unsigned int shape0[] = { 1, 2, 2, 2 };
4178 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4179
4180 unsigned int shape1[] = { 1, 1, 1, 1 };
4181 std::vector<float> input1({ 2 });
4182
4183 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4184
4185 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004186 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004187 shape0,
4188 input0,
4189 shape1,
4190 input1,
4191 shape0,
4192 output);
4193}
4194
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004195LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4196 armnn::IWorkloadFactory& workloadFactory,
4197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004198{
4199 unsigned int shape0[] = { 1, 3, 3, 2 };
4200 std::vector<float> input0({
4201 1, 2, 3, 4, 5, 6,
4202 7, 8, 9, 10, 11, 12,
4203 13, 14, 15, 16, 17, 18});
4204
4205 unsigned int shape1[] = { 1, 1, 1, 2 };
4206 std::vector<float> input1({ 1, 2 });
4207
4208 std::vector<float> output({
4209 1, 4, 3, 8, 5, 12,
4210 7, 16, 9, 20, 11, 24,
4211 13, 28, 15, 32, 17, 36});
4212
4213 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004214 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004215 shape0,
4216 input0,
4217 shape1,
4218 input1,
4219 shape0,
4220 output);
4221}
telsoa014fcda012018-03-09 14:13:49 +00004222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004223LayerTestResult<float,4> CompareMultiplicationTest(
4224 armnn::IWorkloadFactory& workloadFactory,
4225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4226 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004227{
4228 const unsigned int width = 16;
4229 const unsigned int height = 32;
4230 const unsigned int channelCount = 2;
4231 const unsigned int batchSize = 5;
4232
4233 armnn::TensorInfo inputTensorInfo0;
4234 armnn::TensorInfo inputTensorInfo1;
4235 armnn::TensorInfo outputTensorInfo;
4236
4237 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4238
4239 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4240 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4241 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4242
4243 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4244
4245 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4246 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4247
4248 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4249 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4250 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4251
4252 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4253 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4254 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4255
4256 armnn::MultiplicationQueueDescriptor data;
4257 armnn::WorkloadInfo info;
4258 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4259 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4260 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4261
4262 armnn::MultiplicationQueueDescriptor refData = data;
4263 armnn::WorkloadInfo refInfo = info;
4264 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4265 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4266 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4267
4268 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4269 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4270
4271 inputHandle0->Allocate();
4272 inputHandle1->Allocate();
4273 outputHandle->Allocate();
4274 inputHandle0Ref->Allocate();
4275 inputHandle1Ref->Allocate();
4276 outputHandleRef->Allocate();
4277
4278 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4279 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4280 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4281 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4282
Derek Lambertif30f7d32019-04-09 10:25:02 +01004283 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004284 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004285 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004286 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00004287 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4288 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4289
4290 return comparisonResult;
4291}
4292
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004293LayerTestResult<float,4> CompareBatchNormTest(
4294 armnn::IWorkloadFactory& workloadFactory,
4295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4296 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004297{
4298 const unsigned int width = 2;
4299 const unsigned int height = 3;
4300 const unsigned int channels = 5;
4301 const unsigned int batchSize = 3;
4302
4303 armnn::TensorInfo inputTensorInfo;
4304 armnn::TensorInfo outputTensorInfo;
4305 armnn::TensorInfo tensorInfo;
4306
4307 constexpr unsigned int shape[] = {batchSize, channels, height, width};
4308 constexpr unsigned int tensorShape[] = {channels};
4309
4310 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4311 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4312 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4313
4314 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4315
4316 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
4317 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4318 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
4319 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
4320
4321 LayerTestResult<float,4> ret(outputTensorInfo);
4322
4323 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4324 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4325
4326 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4327 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4328
4329 armnn::BatchNormalizationQueueDescriptor data;
4330 armnn::WorkloadInfo info;
4331 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4332 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4333 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4334 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4335
4336 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4337 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4338 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4339 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4340
4341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4343 data.m_Mean = &meanTensor;
4344 data.m_Variance = &varianceTensor;
4345 data.m_Beta = &betaTensor;
4346 data.m_Gamma = &gammaTensor;
4347 data.m_Parameters.m_Eps = 0.01f;
4348
4349 armnn::BatchNormalizationQueueDescriptor refData = data;
4350 armnn::WorkloadInfo refInfo = info;
4351 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4352 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4353
4354 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4355 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4356
4357 inputHandle->Allocate();
4358 outputHandle->Allocate();
4359 inputHandleRef->Allocate();
4360 outputHandleRef->Allocate();
4361
4362 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4363 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4364
Derek Lambertif30f7d32019-04-09 10:25:02 +01004365 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004366 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004367 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004368 workloadRef->Execute();
4369
4370 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4371 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4372
4373 return ret;
4374}
4375
surmeh013537c2c2018-05-18 16:31:43 +01004376template<typename T>
4377void PermuteTensorData(
4378 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004380 const armnn::PermutationVector& mappings,
4381 armnn::TensorInfo & inputTensorInfo,
4382 const T * inputData,
4383 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004384{
surmeh013537c2c2018-05-18 16:31:43 +01004385 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4386 if (inputData == nullptr)
4387 {
4388 // Nullptr is an error in the test. By returning without doing the concatenation
4389 // I expect the caller to fail the test. It still makes sense to report this as
4390 // an assert for Debug builds.
4391 return;
4392 }
telsoa014fcda012018-03-09 14:13:49 +00004393
surmeh013537c2c2018-05-18 16:31:43 +01004394 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4395
4396 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4397 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4398
4399 armnn::PermuteQueueDescriptor queueDescriptor;
4400 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4401 armnn::WorkloadInfo workloadInfo;
4402 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4403 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4404
4405 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4406
4407 inputHandle->Allocate();
4408 outputHandle->Allocate();
4409
4410 CopyDataToITensorHandle(inputHandle.get(), inputData);
4411
Derek Lambertif30f7d32019-04-09 10:25:02 +01004412 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004413 workload->Execute();
4414
4415 outputData.resize(outputTensorInfo.GetNumElements());
4416 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4417 inputTensorInfo = outputTensorInfo;
4418}
4419
Jim Flynn825af452019-05-20 12:49:28 +01004420armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004421 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4422 unsigned int concatDim)
4423{
telsoa014fcda012018-03-09 14:13:49 +00004424 std::vector<armnn::TensorShape> shapes;
4425 shapes.reserve(inputTensorInfos.size());
4426 for (const armnn::TensorInfo& it: inputTensorInfos)
4427 {
4428 shapes.push_back(it.GetShape());
4429 }
surmeh013537c2c2018-05-18 16:31:43 +01004430
Jim Flynn825af452019-05-20 12:49:28 +01004431 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4432 shapes.end(),
4433 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004434}
4435
4436//
narpra015cdda352018-11-19 15:30:27 +00004437// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4438// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4439// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004440//
4441
4442bool NeedPermuteForConcat(
4443 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4444 unsigned int concatDim)
4445{
4446 // See note above. Additionally we expect the input shapes to have the
4447 // same number of dimensions.
4448 unsigned int nDimensions = 0;
4449
telsoa01c577f2c2018-08-31 09:22:23 +01004450 // Determine the number of dimensions as well as sanity check them
4451 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004452 for (auto && tensorInfo : inputTensorInfos)
4453 {
4454 if (!nDimensions)
4455 {
4456 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4457 }
4458 else
4459 {
4460 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4461 "Input shapes must have the same number of dimensions");
4462 }
4463 }
4464
narpra015cdda352018-11-19 15:30:27 +00004465 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004466}
4467
4468armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4469{
4470 unsigned int numDims = inputShape.GetNumDimensions();
4471 if (numDims >= 3)
4472 {
4473 // Nothing to do if the inputShape has at least 3 dimensions.
4474 return inputShape;
4475 }
4476
4477 std::vector<unsigned int> newDims(size_t(3), 1u);
4478 unsigned int expandedBy = 3 - numDims;
4479 for (unsigned int i=0; i<numDims; ++i)
4480 {
4481 newDims[expandedBy+i] = inputShape[i];
4482 }
4483 return armnn::TensorShape(3u, &newDims[0]);
4484}
4485
4486void Generate3dPermuteVectorForConcat(
4487 unsigned int numDimensions,
4488 unsigned int & concatDim,
4489 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4490{
4491 BOOST_ASSERT_MSG(numDimensions <= 3,
4492 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004493 unsigned int expandedBy = 3 - numDimensions;
4494 unsigned int expandedConcatAxis = concatDim + expandedBy;
4495
4496 if (expandedConcatAxis == 2)
4497 {
4498 concatDim = 0;
4499 armnn::PermutationVector forwardPermutation({1, 2, 0});
4500 armnn::PermutationVector reversePermutation({2, 0, 1});
4501 permutations = std::make_pair(forwardPermutation, reversePermutation);
4502 }
4503 else if (expandedConcatAxis == 1)
4504 {
4505 concatDim = 0;
4506 armnn::PermutationVector forwardPermutation({2, 0, 1});
4507 armnn::PermutationVector reversePermutation({1, 2, 0});
4508 permutations = std::make_pair(forwardPermutation, reversePermutation);
4509 }
4510 else
4511 {
4512 BOOST_ASSERT(expandedConcatAxis == 0);
4513 concatDim = 0;
4514 }
4515}
4516
4517//
4518// Permute the input tensors so we can do a supported concatenation.
4519// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4520// at the front. Finally this function tells what the output shape
4521// of the permuted concatenated tensor is going to be.
4522//
4523template <typename T>
4524void PermuteInputsForConcat(
4525 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004527 std::vector<armnn::TensorInfo> & inputTensorInfos,
4528 std::vector<T *> & inputData,
4529 std::vector<std::vector<T>> & inputDataStorage,
4530 armnn::PermutationVector & permuteVector,
4531 unsigned int & concatDim,
4532 armnn::TensorInfo & outputTensorInfo)
4533{
4534 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4535 "Expecting more than one tensor to be concatenated here");
4536
4537 unsigned int numDims = 0;
4538 unsigned int nthInput = 0;
4539 const armnn::PermutationVector identity({0, 1, 2});
4540
4541 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4542 std::make_pair(identity, identity);
4543
4544 inputDataStorage.resize(inputData.size());
4545
4546 for (auto && tensorInfo : inputTensorInfos)
4547 {
4548 if (numDims == 0)
4549 {
4550 numDims = tensorInfo.GetShape().GetNumDimensions();
4551 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004552
telsoa01c577f2c2018-08-31 09:22:23 +01004553 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004554 permuteVector = permutations.second;
4555 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4556 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4557 }
4558 else
4559 {
4560 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4561 "All inputs must have the same number of dimensions");
4562 }
4563
4564 armnn::TensorInfo newTensorInfo = tensorInfo;
4565 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4566
4567 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004568 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004569 permutations.first,
4570 newTensorInfo,
4571 inputData[nthInput],
4572 inputDataStorage[nthInput]);
4573
4574 inputData[nthInput] = inputDataStorage[nthInput].data();
4575 inputTensorInfos[nthInput] = newTensorInfo;
4576
4577 ++nthInput;
4578 }
4579
4580 outputTensorInfo.SetShape(
4581 armnnUtils::Permuted(
4582 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4583 permutations.first));
4584}
4585
4586
4587//
4588// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004589// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004590// output.
4591//
4592template <typename T>
4593void PermuteOutputForConcat(
4594 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004596 const armnn::TensorInfo & tensorInfo,
4597 const armnn::PermutationVector & permuteVector,
4598 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4599 T * data)
4600{
4601 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4602 if (data == nullptr)
4603 {
4604 // Nullptr is an error in the test. By returning without doing the permutation
4605 // I expect the caller to fail the test. It still makes sense to report this as
4606 // an assert for Debug builds.
4607 return;
4608 }
4609
4610 armnn::TensorInfo resultTensorInfo = tensorInfo;
4611 std::vector<T> inputData(tensorInfo.GetNumElements());
4612 std::vector<T> outputData;
4613
4614 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4615
4616 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004617 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004618 permuteVector,
4619 resultTensorInfo,
4620 &inputData[0],
4621 outputData);
4622
4623 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4624}
4625
4626template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004627void Concatenate(
4628 armnn::IWorkloadFactory& workloadFactory,
4629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4630 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4631 std::initializer_list<T *> inputsOrig,
4632 const armnn::TensorInfo& outputTensorInfoOrig,
4633 T * output,
narpra015cdda352018-11-19 15:30:27 +00004634 unsigned int concatDim,
4635 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004636{
4637 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4638 if (output == nullptr)
4639 {
4640 // Nullptr is an error in the test. By returning without doing the permutation
4641 // I expect the caller to fail the test. It still makes sense to report this as
4642 // an assert for Debug builds.
4643 return;
4644 }
4645
telsoa01c577f2c2018-08-31 09:22:23 +01004646 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004647 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4648 std::vector<T *> inputs = inputsOrig;
4649 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4650
4651 armnn::PermutationVector permuteVector{0, 1, 2};
4652
telsoa01c577f2c2018-08-31 09:22:23 +01004653 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004654 std::vector<std::vector<T>> tmpInputDataStorage;
4655
4656 const size_t inputCount = inputTensorInfos.size();
4657
4658 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4659
4660 if (needPermuteForConcat)
4661 {
4662 //
4663 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004664 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004665 //
4666 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004667 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004668 inputTensorInfos,
4669 inputs,
4670 tmpInputDataStorage,
4671 permuteVector,
4672 concatDim,
4673 outputTensorInfo);
4674 }
4675
narpra015cdda352018-11-19 15:30:27 +00004676 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004677
4678 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4679 inputHandles.reserve(inputCount);
4680
narpra015cdda352018-11-19 15:30:27 +00004681 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4682
Jim Flynne242f2d2019-05-22 14:24:13 +01004683 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004684 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004685 queueDescriptor.m_Parameters = viewsDescriptor;
4686
4687 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004688 {
narpra015cdda352018-11-19 15:30:27 +00004689 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4690 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4691 {
4692 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4693 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4694 }
telsoa014fcda012018-03-09 14:13:49 +00004695
narpra015cdda352018-11-19 15:30:27 +00004696 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004697
narpra015cdda352018-11-19 15:30:27 +00004698 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4699 for (unsigned int i = 0; i < inputCount; ++i)
4700 {
4701 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4702 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4703 subTensorsSupported ?
4704 workloadFactory.CreateSubTensorHandle(*outputHandle,
4705 inputTensorInfo.GetShape(),
4706 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4707 workloadFactory.CreateTensorHandle(inputTensorInfo);
4708
4709 inputHandles.emplace_back(std::move(inputHandle));
4710 }
4711
telsoa014fcda012018-03-09 14:13:49 +00004712 }
narpra015cdda352018-11-19 15:30:27 +00004713 else
4714 {
4715 for (unsigned int i = 0; i < inputCount; ++i)
4716 {
4717 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4718 inputHandles.emplace_back(std::move(inputHandle));
4719 }
4720 }
telsoa014fcda012018-03-09 14:13:49 +00004721
4722 for (unsigned int i = 0; i < inputCount; ++i)
4723 {
surmeh013537c2c2018-05-18 16:31:43 +01004724 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004725 }
4726
4727 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4728
Jim Flynn4ed6c832019-05-20 11:02:46 +01004729 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004730
4731 for (auto& inputHandle : inputHandles)
4732 {
4733 inputHandle->Allocate();
4734 }
4735
4736 outputHandle->Allocate();
4737
4738 unsigned int nextInputId = 0;
4739 for (auto& inputHandle : inputHandles)
4740 {
surmeh013537c2c2018-05-18 16:31:43 +01004741 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4742 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004743 }
4744
Derek Lambertif30f7d32019-04-09 10:25:02 +01004745 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004746 workload->Execute();
4747
surmeh013537c2c2018-05-18 16:31:43 +01004748 if (needPermuteForConcat)
4749 {
4750 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004751 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004752 outputTensorInfo,
4753 permuteVector,
4754 std::move(outputHandle),
4755 output);
4756 }
4757 else
4758 {
4759 CopyDataFromITensorHandle(output, outputHandle.get());
4760 }
telsoa014fcda012018-03-09 14:13:49 +00004761}
4762
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004763template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764LayerTestResult<T, 1> Concatenation1dTestImpl(
4765 armnn::IWorkloadFactory& workloadFactory,
4766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4767 float qScale,
4768 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004769{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004770 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004771
4772 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4773 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4774 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4775
Jim Flynncbb66aa2019-05-15 13:03:54 +01004776 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004777
4778 LayerTestResult<T, 1> result(outputTensorInfo);
4779
4780 std::vector<T> output;
4781 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004782 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004783 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4784 { input0.data(), input1.data(), input2.data() },
4785 outputTensorInfo,
4786 output.data(),
4787 0,
4788 true);
telsoa014fcda012018-03-09 14:13:49 +00004789
4790 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4791 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4792 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4793 }));
4794
4795 return result;
4796}
4797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004798LayerTestResult<float, 1> Concatenation1dTest(
4799 armnn::IWorkloadFactory& workloadFactory,
4800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004801{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004802 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004803}
4804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004805template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004806LayerTestResult<T, 2> Concatenation2dTestImpl(
4807 armnn::IWorkloadFactory& workloadFactory,
4808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004809 const armnn::TensorInfo& outputTensorInfo,
4810 unsigned int dimension,
4811 const float qScale,
4812 const int32_t qOffset)
4813{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004814 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004815
4816 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4817 // Batch 0
4818 1.0f, 2.0f, 3.0f,
4819
4820 // Batch 1
4821 10.0f, 11.0f, 12.0f,
4822 }));
4823
4824 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4825 // Batch 0
4826 4.0f, 5.0f, 6.0f,
4827
4828 // Batch 1
4829 13.0f, 14.0f, 15.0f,
4830 }));
4831
4832 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4833 // Batch 0
4834 7.0f, 8.0f, 9.0f,
4835
4836 // Batch 1
4837 16.0f, 17.0f, 18.0f,
4838 }));
4839
4840 LayerTestResult<T, 2> result(outputTensorInfo);
4841
4842 std::vector<T> output;
4843 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004844 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004845 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4846 { input0.data(), input1.data(), input2.data() },
4847 outputTensorInfo,
4848 output.data(),
4849 dimension,
4850 true);
telsoa014fcda012018-03-09 14:13:49 +00004851
4852 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4853 return result;
4854}
4855
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004856template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004857LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4858 armnn::IWorkloadFactory& workloadFactory,
4859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4860 float qScale,
4861 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004862{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004863 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004864
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004865 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4866 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4867
telsoa014fcda012018-03-09 14:13:49 +00004868 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4869 // Batch 0
4870 1.0f, 2.0f, 3.0f,
4871
4872 // Batch 1
4873 10.0f, 11.0f, 12.0f,
4874
4875 // Batch 2
4876 4.0f, 5.0f, 6.0f,
4877
4878 // Batch 3
4879 13.0f, 14.0f, 15.0f,
4880
4881 // Batch 4
4882 7.0f, 8.0f, 9.0f,
4883
4884 // Batch 5
4885 16.0f, 17.0f, 18.0f,
4886 }));
4887
4888 return result;
4889}
4890
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004891LayerTestResult<float, 2> Concatenation2dDim0Test(
4892 armnn::IWorkloadFactory& workloadFactory,
4893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004895 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004896}
4897
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004898template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004899LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4900 armnn::IWorkloadFactory& workloadFactory,
4901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4902 float qScale,
4903 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004904{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004905 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004906
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004907 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4908 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4909
telsoa014fcda012018-03-09 14:13:49 +00004910 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4911 // Batch 0
4912 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4913
4914 // Batch 1
4915 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4916 }));
4917
4918 return result;
4919}
4920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004921LayerTestResult<float, 2> Concatenation2dDim1Test(
4922 armnn::IWorkloadFactory& workloadFactory,
4923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004925 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004926}
4927
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004928template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004929LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4930 armnn::IWorkloadFactory& workloadFactory,
4931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4932 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004933 int32_t qOffset)
4934{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004935 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004936 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4937 // Batch 0
4938 1.0f, 2.0f, 3.0f,
4939
4940 // Batch 1
4941 10.0f, 11.0f, 12.0f,
4942 }));
4943
Jim Flynncbb66aa2019-05-15 13:03:54 +01004944 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004945 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4946 // Batch 0
4947 4.0f, 5.0f, 6.0f,
4948
4949 // Batch 1
4950 13.0f, 14.0f, 15.0f,
4951
4952 // Batch 0
4953 7.0f, 8.0f, 9.0f,
4954 }));
4955
Jim Flynncbb66aa2019-05-15 13:03:54 +01004956 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004957 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4958 // Batch 1
4959 16.0f, 17.0f, 18.0f,
4960 }));
4961
Jim Flynncbb66aa2019-05-15 13:03:54 +01004962 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004963 LayerTestResult<T, 2> result(outputTensorInfo);
4964
4965 std::vector<T> output;
4966 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004967 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004968 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4969 { input0.data(), input1.data(), input2.data() },
4970 outputTensorInfo,
4971 output.data(),
4972 0,
4973 true);
telsoa014fcda012018-03-09 14:13:49 +00004974
4975 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4976 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4977 // Batch 0
4978 1.0f, 2.0f, 3.0f,
4979
4980 // Batch 1
4981 10.0f, 11.0f, 12.0f,
4982
4983 // Batch 2
4984 4.0f, 5.0f, 6.0f,
4985
4986 // Batch 3
4987 13.0f, 14.0f, 15.0f,
4988
4989 // Batch 4
4990 7.0f, 8.0f, 9.0f,
4991
4992 // Batch 5
4993 16.0f, 17.0f, 18.0f,
4994 }));
4995
4996 return result;
4997}
4998
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004999LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5000 armnn::IWorkloadFactory& workloadFactory,
5001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005002{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005003 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5004 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005005}
5006
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005007template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005008LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5009 armnn::IWorkloadFactory& workloadFactory,
5010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5011 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005012 int32_t qOffset)
5013{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005014 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005015 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5016 // Batch 0
5017 1.0f, 2.0f, 3.0f,
5018
5019 // Batch 1
5020 10.0f, 11.0f, 12.0f,
5021 }));
5022
Jim Flynncbb66aa2019-05-15 13:03:54 +01005023 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005024 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5025 // Batch 0
5026 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5027
5028 // Batch 1
5029 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5030 }));
5031
Jim Flynncbb66aa2019-05-15 13:03:54 +01005032 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005033 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5034 // Batch 0
5035 9.0f,
5036
5037 // Batch 1
5038 18.0f
5039 }));
5040
Jim Flynncbb66aa2019-05-15 13:03:54 +01005041 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005042 LayerTestResult<T, 2> result(outputTensorInfo);
5043
5044 std::vector<T> output;
5045 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005046 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005047 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5048 { input0.data(), input1.data(), input2.data() },
5049 outputTensorInfo,
5050 output.data(),
5051 1,
5052 true);
telsoa014fcda012018-03-09 14:13:49 +00005053
5054 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5055 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5056 // Batch 0
5057 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5058
5059 // Batch 1
5060 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5061 }));
5062
5063 return result;
5064}
5065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005066LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5067 armnn::IWorkloadFactory& workloadFactory,
5068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005069{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005070 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5071 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005072}
5073
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005074template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005075LayerTestResult<T, 3> Concatenation3dTestImpl(
5076 armnn::IWorkloadFactory& workloadFactory,
5077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005078 const armnn::TensorInfo& outputTensorInfo,
5079 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00005080 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00005081 float qScale,
5082 int32_t qOffset)
5083{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005084 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005085
5086 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5087 // Batch 0, Channel 0
5088 1.0f, 2.0f,
5089
5090 // Batch 0, Channel 1
5091 3.0f, 4.0f,
5092
5093 // Batch 0, Channel 2
5094 5.0f, 6.0f,
5095
5096 // Batch 1, Channel 0
5097 19.0f, 20.0f,
5098
5099 // Batch 1, Channel 1
5100 21.0f, 22.0f,
5101
5102 // Batch 1, Channel 2
5103 23.0f, 24.0f
5104 }));
5105
5106 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5107 // Batch 0, Channel 0
5108 7.0f, 8.0f,
5109
5110 // Batch 0, Channel 1
5111 9.0f, 10.0f,
5112
5113 // Batch 0, Channel 2
5114 11.0f, 12.0f,
5115
5116 // Batch 1, Channel 0
5117 25.0f, 26.0f,
5118
5119 // Batch 1, Channel 1
5120 27.0f, 28.0f,
5121
5122 // Batch 1, Channel 2
5123 29.0f, 30.0f
5124 }));
5125
5126 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5127 // Batch 0, Channel 0
5128 13.0f, 14.0f,
5129
5130 // Batch 0, Channel 1
5131 15.0f, 16.0f,
5132
5133 // Batch 0, Channel 2
5134 17.0f, 18.0f,
5135
5136 // Batch 1, Channel 0
5137 31.0f, 32.0f,
5138
5139 // Batch 1, Channel 1
5140 33.0f, 34.0f,
5141
5142 // Batch 1, Channel 2
5143 35.0f, 36.0f
5144 }));
5145
5146 LayerTestResult<T, 3> result(outputTensorInfo);
5147
5148 std::vector<T> output;
5149 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005150 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005151 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5152 { input0.data(), input1.data(), input2.data() },
5153 outputTensorInfo,
5154 output.data(),
5155 dimension,
5156 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005157
5158 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5159 return result;
5160}
5161
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005162template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005163LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5164 armnn::IWorkloadFactory& workloadFactory,
5165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5166 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005167 int32_t qOffset)
5168{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005169 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005170
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005171 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5172 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5173
telsoa014fcda012018-03-09 14:13:49 +00005174 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5175 // Batch 0, Channel 0
5176 1.0f, 2.0f,
5177
5178 // Batch 0, Channel 1
5179 3.0f, 4.0f,
5180
5181 // Batch 0, Channel 2
5182 5.0f, 6.0f,
5183
5184 // Batch 1, Channel 0
5185 19.0f, 20.0f,
5186
5187 // Batch 1, Channel 1
5188 21.0f, 22.0f,
5189
5190 // Batch 1, Channel 2
5191 23.0f, 24.0f,
5192
5193 // Batch 2, Channel 0
5194 7.0f, 8.0f,
5195
5196 // Batch 2, Channel 1
5197 9.0f, 10.0f,
5198
5199 // Batch 2, Channel 2
5200 11.0f, 12.0f,
5201
5202 // Batch 3, Channel 0
5203 25.0f, 26.0f,
5204
5205 // Batch 3, Channel 1
5206 27.0f, 28.0f,
5207
5208 // Batch 3, Channel 2
5209 29.0f, 30.0f,
5210
5211 // Batch 4, Channel 0
5212 13.0f, 14.0f,
5213
5214 // Batch 4, Channel 1
5215 15.0f, 16.0f,
5216
5217 // Batch 4, Channel 2
5218 17.0f, 18.0f,
5219
5220 // Batch 5, Channel 0
5221 31.0f, 32.0f,
5222
5223 // Batch 5, Channel 1
5224 33.0f, 34.0f,
5225
5226 // Batch 5, Channel 2
5227 35.0f, 36.0f
5228 }));
narpra015cdda352018-11-19 15:30:27 +00005229
telsoa014fcda012018-03-09 14:13:49 +00005230 return result;
5231}
5232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005233LayerTestResult<float, 3> Concatenation3dDim0Test(
5234 armnn::IWorkloadFactory& workloadFactory,
5235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005236{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005237 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005238}
5239
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005240template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005241LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5242 armnn::IWorkloadFactory& workloadFactory,
5243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5244 float qScale,
5245 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005246{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005247 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005248
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005249 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5250 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005251
telsoa014fcda012018-03-09 14:13:49 +00005252 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5253 // Batch 0, Channel 0
5254 1.0f, 2.0f,
5255
5256 // Batch 0, Channel 1
5257 3.0f, 4.0f,
5258
5259 // Batch 0, Channel 2
5260 5.0f, 6.0f,
5261
5262 // Batch 0, Channel 3
5263 7.0f, 8.0f,
5264
5265 // Batch 0, Channel 4
5266 9.0f, 10.0f,
5267
5268 // Batch 0, Channel 5
5269 11.0f, 12.0f,
5270
5271 // Batch 0, Channel 6
5272 13.0f, 14.0f,
5273
5274 // Batch 0, Channel 7
5275 15.0f, 16.0f,
5276
5277 // Batch 0, Channel 8
5278 17.0f, 18.0f,
5279
5280 // Batch 1, Channel 0
5281 19.0f, 20.0f,
5282
5283 // Batch 1, Channel 1
5284 21.0f, 22.0f,
5285
5286 // Batch 1, Channel 2
5287 23.0f, 24.0f,
5288
5289 // Batch 1, Channel 3
5290 25.0f, 26.0f,
5291
5292 // Batch 1, Channel 4
5293 27.0f, 28.0f,
5294
5295 // Batch 1, Channel 5
5296 29.0f, 30.0f,
5297
5298 // Batch 1, Channel 6
5299 31.0f, 32.0f,
5300
5301 // Batch 1, Channel 7
5302 33.0f, 34.0f,
5303
5304 // Batch 1, Channel 8
5305 35.0f, 36.0f
5306 }));
5307
5308 return result;
5309}
5310
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005311LayerTestResult<float, 3> Concatenation3dDim1Test(
5312 armnn::IWorkloadFactory& workloadFactory,
5313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005314{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005315 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005316}
5317
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005318template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005319LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5320 armnn::IWorkloadFactory& workloadFactory,
5321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005322 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005323 float qScale,
5324 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005325{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005326 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005327
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005328 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5329 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005330
telsoa014fcda012018-03-09 14:13:49 +00005331 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5332 // Batch 0, Channel 0
5333 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5334
5335 // Batch 0, Channel 1
5336 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5337
5338 // Batch 0, Channel 2
5339 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5340
5341 // Batch 1, Channel 0
5342 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5343
5344 // Batch 1, Channel 1
5345 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5346
5347 // Batch 1, Channel 2
5348 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5349 }));
5350
5351 return result;
5352}
5353
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005354LayerTestResult<float, 3> Concatenation3dDim2Test(
5355 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5357 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005358{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005359 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5360 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005361}
5362
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005363template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005364LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5365 armnn::IWorkloadFactory& workloadFactory,
5366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5367 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005368 int32_t qOffset)
5369{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005370 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005371 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5372 // Batch 0, Channel 0
5373 1.0f, 2.0f,
5374
5375 // Batch 0, Channel 1
5376 3.0f, 4.0f,
5377
5378 // Batch 0, Channel 2
5379 5.0f, 6.0f,
5380
5381 // Batch 1, Channel 0
5382 19.0f, 20.0f,
5383
5384 // Batch 1, Channel 1
5385 21.0f, 22.0f,
5386
5387 // Batch 1, Channel 2
5388 23.0f, 24.0f
5389 }));
5390
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005391 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005392 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5393 // Batch 0, Channel 0
5394 7.0f, 8.0f,
5395
5396 // Batch 0, Channel 1
5397 9.0f, 10.0f,
5398
5399 // Batch 0, Channel 2
5400 11.0f, 12.0f,
5401 }));
5402
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005403 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005404 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5405 // Batch 0, Channel 0
5406 25.0f, 26.0f,
5407
5408 // Batch 0, Channel 1
5409 27.0f, 28.0f,
5410
5411 // Batch 0, Channel 2
5412 29.0f, 30.0f,
5413
5414 // Batch 1, Channel 0
5415 13.0f, 14.0f,
5416
5417 // Batch 1, Channel 1
5418 15.0f, 16.0f,
5419
5420 // Batch 1, Channel 2
5421 17.0f, 18.0f,
5422
5423 // Batch 2, Channel 0
5424 31.0f, 32.0f,
5425
5426 // Batch 2, Channel 1
5427 33.0f, 34.0f,
5428
5429 // Batch 2, Channel 2
5430 35.0f, 36.0f
5431 }));
5432
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005433 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005434 LayerTestResult<T, 3> result(outputTensorInfo);
5435
5436 std::vector<T> output;
5437 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005438 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005439 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5440 { input0.data(), input1.data(), input2.data() },
5441 outputTensorInfo,
5442 output.data(),
5443 0,
5444 true);
telsoa014fcda012018-03-09 14:13:49 +00005445
5446 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5447 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5448 // Batch 0, Channel 0
5449 1.0f, 2.0f,
5450
5451 // Batch 0, Channel 1
5452 3.0f, 4.0f,
5453
5454 // Batch 0, Channel 2
5455 5.0f, 6.0f,
5456
5457 // Batch 1, Channel 0
5458 19.0f, 20.0f,
5459
5460 // Batch 1, Channel 1
5461 21.0f, 22.0f,
5462
5463 // Batch 1, Channel 2
5464 23.0f, 24.0f,
5465
5466 // Batch 2, Channel 0
5467 7.0f, 8.0f,
5468
5469 // Batch 2, Channel 1
5470 9.0f, 10.0f,
5471
5472 // Batch 2, Channel 2
5473 11.0f, 12.0f,
5474
5475 // Batch 3, Channel 0
5476 25.0f, 26.0f,
5477
5478 // Batch 3, Channel 1
5479 27.0f, 28.0f,
5480
5481 // Batch 3, Channel 2
5482 29.0f, 30.0f,
5483
5484 // Batch 4, Channel 0
5485 13.0f, 14.0f,
5486
5487 // Batch 4, Channel 1
5488 15.0f, 16.0f,
5489
5490 // Batch 4, Channel 2
5491 17.0f, 18.0f,
5492
5493 // Batch 5, Channel 0
5494 31.0f, 32.0f,
5495
5496 // Batch 5, Channel 1
5497 33.0f, 34.0f,
5498
5499 // Batch 5, Channel 2
5500 35.0f, 36.0f
5501 }));
5502
5503 return result;
5504}
5505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005506LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5507 armnn::IWorkloadFactory& workloadFactory,
5508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005509{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005510 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5511 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005512}
5513
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005514template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005515LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5516 armnn::IWorkloadFactory& workloadFactory,
5517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5518 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005519 int32_t qOffset)
5520{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005521 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005522 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5523 // Batch 0, Channel 0
5524 1.0f, 2.0f,
5525
5526 // Batch 0, Channel 1
5527 3.0f, 4.0f,
5528
5529 // Batch 0, Channel 2
5530 5.0f, 6.0f,
5531
5532 // Batch 1, Channel 0
5533 19.0f, 20.0f,
5534
5535 // Batch 1, Channel 1
5536 21.0f, 22.0f,
5537
5538 // Batch 1, Channel 2
5539 23.0f, 24.0f
5540 }));
5541
Jim Flynncbb66aa2019-05-15 13:03:54 +01005542 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005543 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5544 // Batch 0, Channel 0
5545 7.0f, 8.0f,
5546
5547 // Batch 0, Channel 1
5548 9.0f, 10.0f,
5549
5550 // Batch 0, Channel 2
5551 11.0f, 12.0f,
5552
5553 // Batch 0, Channel 3
5554 25.0f, 26.0f,
5555
5556 // Batch 1, Channel 0
5557 27.0f, 28.0f,
5558
5559 // Batch 1, Channel 1
5560 29.0f, 30.0f,
5561
5562 // Batch 1, Channel 2
5563 13.0f, 14.0f,
5564
5565 // Batch 1, Channel 3
5566 15.0f, 16.0f,
5567 }));
5568
Jim Flynncbb66aa2019-05-15 13:03:54 +01005569 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005570 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5571 // Batch 0, Channel 0
5572 17.0f, 18.0f,
5573
5574 // Batch 1, Channel 0
5575 31.0f, 32.0f,
5576 }));
5577
Jim Flynncbb66aa2019-05-15 13:03:54 +01005578 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005579 LayerTestResult<T, 3> result(outputTensorInfo);
5580
5581 std::vector<T> output;
5582 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005583 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005584 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5585 { input0.data(), input1.data(), input2.data() },
5586 outputTensorInfo,
5587 output.data(),
5588 1,
5589 true);
telsoa014fcda012018-03-09 14:13:49 +00005590
5591 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5592 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5593 // Batch 0, Channel 0
5594 1.0f, 2.0f,
5595
5596 // Batch 0, Channel 1
5597 3.0f, 4.0f,
5598
5599 // Batch 0, Channel 2
5600 5.0f, 6.0f,
5601
5602 // Batch 0, Channel 3
5603 7.0f, 8.0f,
5604
5605 // Batch 0, Channel 4
5606 9.0f, 10.0f,
5607
5608 // Batch 0, Channel 5
5609 11.0f, 12.0f,
5610
5611 // Batch 0, Channel 6
5612 25.0f, 26.0f,
5613
5614 // Batch 0, Channel 7
5615 17.0f, 18.0f,
5616
5617 // Batch 1, Channel 0
5618 19.0f, 20.0f,
5619
5620 // Batch 1, Channel 1
5621 21.0f, 22.0f,
5622
5623 // Batch 1, Channel 2
5624 23.0f, 24.0f,
5625
5626 // Batch 1, Channel 3
5627 27.0f, 28.0f,
5628
5629 // Batch 1, Channel 4
5630 29.0f, 30.0f,
5631
5632 // Batch 1, Channel 5
5633 13.0f, 14.0f,
5634
5635 // Batch 1, Channel 6
5636 15.0f, 16.0f,
5637
5638 // Batch 1, Channel 7
5639 31.0f, 32.0f,
5640 }));
5641
5642 return result;
5643}
5644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005645LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5646 armnn::IWorkloadFactory& workloadFactory,
5647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005649 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5650 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005651}
5652
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005653template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005654LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5655 armnn::IWorkloadFactory& workloadFactory,
5656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005657 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005658 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005659 int32_t qOffset)
5660{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005661 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005662 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5663 // Batch 0, Channel 0
5664 1.0f, 2.0f,
5665
5666 // Batch 0, Channel 1
5667 3.0f, 4.0f,
5668
5669 // Batch 0, Channel 2
5670 5.0f, 6.0f,
5671
5672 // Batch 1, Channel 0
5673 19.0f, 20.0f,
5674
5675 // Batch 1, Channel 1
5676 21.0f, 22.0f,
5677
5678 // Batch 1, Channel 2
5679 23.0f, 24.0f
5680 }));
5681
Jim Flynncbb66aa2019-05-15 13:03:54 +01005682 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005683 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5684 // Batch 0, Channel 0
5685 7.0f,
5686
5687 // Batch 0, Channel 1
5688 9.0f,
5689
5690 // Batch 0, Channel 2
5691 11.0f,
5692
5693 // Batch 1, Channel 0
5694 25.0f,
5695
5696 // Batch 1, Channel 1
5697 27.0f,
5698
5699 // Batch 1, Channel 2
5700 29.0f
5701 }));
5702
Jim Flynncbb66aa2019-05-15 13:03:54 +01005703 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005704 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5705 // Batch 0, Channel 0
5706 13.0f, 14.0f, 50.0f,
5707
5708 // Batch 0, Channel 1
5709 15.0f, 16.0f, 51.0f,
5710
5711 // Batch 0, Channel 2
5712 17.0f, 18.0f, 52.0f,
5713
5714 // Batch 1, Channel 0
5715 31.0f, 32.0f, 53.0f,
5716
5717 // Batch 1, Channel 1
5718 33.0f, 34.0f, 54.0f,
5719
5720 // Batch 1, Channel 2
5721 35.0f, 36.0f, 55.0f,
5722 }));
5723
Jim Flynncbb66aa2019-05-15 13:03:54 +01005724 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005725 LayerTestResult<T, 3> result(outputTensorInfo);
5726
5727 std::vector<T> output;
5728 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005729 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005730 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5731 { input0.data(), input1.data(), input2.data() },
5732 outputTensorInfo,
5733 output.data(),
5734 2,
5735 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005736
5737 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5738 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5739 // Batch 0, Channel 0
5740 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5741
5742 // Batch 0, Channel 1
5743 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5744
5745 // Batch 0, Channel 2
5746 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5747
5748 // Batch 1, Channel 0
5749 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5750
5751 // Batch 1, Channel 1
5752 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5753
5754 // Batch 1, Channel 2
5755 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5756 }));
5757
5758 return result;
5759}
5760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005761LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5762 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5764 bool useSubtensor)
5765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005766 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5767 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005768}
5769
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005770template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005771LayerTestResult<T, 4> Concatenation4dTestImpl(
5772 armnn::IWorkloadFactory& workloadFactory,
5773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5774 const armnn::TensorInfo& outputTensorInfo,
5775 unsigned int dimension,
5776 bool useSubtensor,
5777 float qScale,
5778 int32_t qOffset)
5779{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005780 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005781
5782 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5783 1.0f, 2.0f,
5784 3.0f, 4.0f,
5785 5.0f, 6.0f,
5786 7.0f, 8.0f,
5787 9.0f, 10.0f,
5788 11.0f, 12.0f
5789 }));
5790
5791 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5792 11.0f, 12.0f,
5793 13.0f, 14.0f,
5794 15.0f, 16.0f,
5795 17.0f, 18.0f,
5796 19.0f, 20.0f,
5797 21.0f, 22.0f
5798 }));
5799
5800 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5801 21.0f, 22.0f,
5802 23.0f, 24.0f,
5803 25.0f, 26.0f,
5804 27.0f, 28.0f,
5805 29.0f, 30.0f,
5806 31.0f, 32.0f
5807 }));
5808
5809 LayerTestResult<T, 4> result(outputTensorInfo);
5810
5811 std::vector<T> output;
5812 output.resize(outputTensorInfo.GetNumElements());
5813
5814 Concatenate<T>(workloadFactory,
5815 memoryManager,
5816 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5817 {input0.data(), input1.data(), input2.data()},
5818 outputTensorInfo,
5819 output.data(),
5820 dimension,
5821 useSubtensor);
5822
5823 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5824 return result;
5825}
5826
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005827template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005828LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5829 armnn::IWorkloadFactory& workloadFactory,
5830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5831 float qScale,
5832 int32_t qOffset)
5833{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005834 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005835
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005836 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5837 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5838
narpra015cdda352018-11-19 15:30:27 +00005839 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5840 1.0f, 2.0f,
5841 3.0f, 4.0f,
5842 5.0f, 6.0f,
5843 7.0f, 8.0f,
5844 9.0f, 10.0f,
5845 11.0f, 12.0f,
5846
5847 11.0f, 12.0f,
5848 13.0f, 14.0f,
5849 15.0f, 16.0f,
5850 17.0f, 18.0f,
5851 19.0f, 20.0f,
5852 21.0f, 22.0f,
5853
5854 21.0f, 22.0f,
5855 23.0f, 24.0f,
5856 25.0f, 26.0f,
5857 27.0f, 28.0f,
5858 29.0f, 30.0f,
5859 31.0f, 32.0f
5860 }));
5861 return result;
5862}
5863
5864LayerTestResult<float, 4> Concatenation4dDim0Test(
5865 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005867{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005868 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005869}
5870
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005871template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005872LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5873 armnn::IWorkloadFactory& workloadFactory,
5874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5875 float qScale,
5876 int32_t qOffset)
5877{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005878 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005879
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005880 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5881 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5882
narpra015cdda352018-11-19 15:30:27 +00005883 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5884 1.0f, 2.0f,
5885 3.0f, 4.0f,
5886 5.0f, 6.0f,
5887 7.0f, 8.0f,
5888 9.0f, 10.0f,
5889 11.0f, 12.0f,
5890
5891 11.0f, 12.0f,
5892 13.0f, 14.0f,
5893 15.0f, 16.0f,
5894 17.0f, 18.0f,
5895 19.0f, 20.0f,
5896 21.0f, 22.0f,
5897
5898 21.0f, 22.0f,
5899 23.0f, 24.0f,
5900 25.0f, 26.0f,
5901 27.0f, 28.0f,
5902 29.0f, 30.0f,
5903 31.0f, 32.0f
5904 }));
5905
5906 return result;
5907}
5908
5909LayerTestResult<float, 4> Concatenation4dDim1Test(
5910 armnn::IWorkloadFactory& workloadFactory,
5911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005913 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005914}
5915
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005916template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005917LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5918 armnn::IWorkloadFactory& workloadFactory,
5919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5920 float qScale,
5921 int32_t qOffset)
5922{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005923 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005924
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005925 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5926 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5927
narpra015cdda352018-11-19 15:30:27 +00005928 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5929 1.0f, 2.0f,
5930 3.0f, 4.0f,
5931 11.0f, 12.0f,
5932 13.0f, 14.0f,
5933 21.0f, 22.0f,
5934 23.0f, 24.0f,
5935
5936 5.0f, 6.0f,
5937 7.0f, 8.0f,
5938 15.0f, 16.0f,
5939 17.0f, 18.0f,
5940 25.0f, 26.0f,
5941 27.0f, 28.0f,
5942
5943 9.0f, 10.0f,
5944 11.0f, 12.0f,
5945 19.0f, 20.0f,
5946 21.0f, 22.0f,
5947 29.0f, 30.0f,
5948 31.0f, 32.0f
5949 }));
5950
5951 return result;
5952}
5953
5954LayerTestResult<float, 4> Concatenation4dDim2Test(
5955 armnn::IWorkloadFactory& workloadFactory,
5956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5957{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005958 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005959}
5960
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005961template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005962LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5963 armnn::IWorkloadFactory& workloadFactory,
5964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5965 float qScale,
5966 int32_t qOffset,
5967 bool useSubtensor)
5968{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005969 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005971 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5972 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5973
narpra015cdda352018-11-19 15:30:27 +00005974 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5975 1.0f, 2.0f,
5976 11.0f, 12.0f,
5977 21.0f, 22.0f,
5978 3.0f, 4.0f,
5979 13.0f, 14.0f,
5980 23.0f, 24.0f,
5981
5982 5.0f, 6.0f,
5983 15.0f, 16.0f,
5984 25.0f, 26.0f,
5985 7.0f, 8.0f,
5986 17.0f, 18.0f,
5987 27.0f, 28.0f,
5988
5989 9.0f, 10.0f,
5990 19.0f, 20.0f,
5991 29.0f, 30.0f,
5992 11.0f, 12.0f,
5993 21.0f, 22.0f,
5994 31.0f, 32.0f
5995 }));
5996
5997 return result;
5998}
5999
6000LayerTestResult<float, 4> Concatenation4dDim3Test(
6001 armnn::IWorkloadFactory& workloadFactory,
6002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6003 bool useSubtensor)
6004{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006005 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6006 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00006007}
6008
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006009template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006010LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6011 armnn::IWorkloadFactory& workloadFactory,
6012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6013 float qScale,
6014 int32_t qOffset)
6015{
6016 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006017 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006018
6019 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6020 1.0f, 2.0f,
6021 3.0f, 4.0f,
6022 5.0f, 6.0f,
6023 7.0f, 8.0f,
6024 9.0f, 10.0f,
6025 11.0f, 12.0f
6026 }));
6027
Jim Flynncbb66aa2019-05-15 13:03:54 +01006028 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006029
6030 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6031 11.0f, 12.0f,
6032 13.0f, 14.0f,
6033 15.0f, 16.0f,
6034 17.0f, 18.0f,
6035 19.0f, 20.0f,
6036 21.0f, 22.0f,
6037
6038 21.0f, 22.0f,
6039 23.0f, 24.0f,
6040 25.0f, 26.0f,
6041 27.0f, 28.0f,
6042 29.0f, 30.0f,
6043 31.0f, 32.0f
6044
6045 }));
6046
Jim Flynncbb66aa2019-05-15 13:03:54 +01006047 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006048
6049 LayerTestResult<T, 4> result(outputTensorInfo);
6050
6051 std::vector<T> output;
6052 output.resize(outputTensorInfo.GetNumElements());
6053 Concatenate<T>(workloadFactory,
6054 memoryManager,
6055 {inputTensorInfo0, inputTensorInfo1},
6056 {input0.data(), input1.data()},
6057 outputTensorInfo,
6058 output.data(),
6059 dimension,
6060 true);
6061
6062 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6063 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6064 1.0f, 2.0f,
6065 3.0f, 4.0f,
6066 5.0f, 6.0f,
6067 7.0f, 8.0f,
6068 9.0f, 10.0f,
6069 11.0f, 12.0f,
6070
6071 11.0f, 12.0f,
6072 13.0f, 14.0f,
6073 15.0f, 16.0f,
6074 17.0f, 18.0f,
6075 19.0f, 20.0f,
6076 21.0f, 22.0f,
6077
6078 21.0f, 22.0f,
6079 23.0f, 24.0f,
6080 25.0f, 26.0f,
6081 27.0f, 28.0f,
6082 29.0f, 30.0f,
6083 31.0f, 32.0f
6084 }));
6085
6086 return result;
6087}
6088
6089LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6090 armnn::IWorkloadFactory& workloadFactory,
6091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6092{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006093 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6094 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006095}
6096
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006097template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006098LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6099 armnn::IWorkloadFactory& workloadFactory,
6100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6101 float qScale,
6102 int32_t qOffset)
6103{
6104 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006105 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006106
6107 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6108 1.0f, 2.0f,
6109 3.0f, 4.0f,
6110 5.0f, 6.0f,
6111 7.0f, 8.0f,
6112 9.0f, 10.0f,
6113 11.0f, 12.0f
6114 }));
6115
Jim Flynncbb66aa2019-05-15 13:03:54 +01006116 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006117
6118 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6119 11.0f, 12.0f,
6120 13.0f, 14.0f,
6121 15.0f, 16.0f,
6122 17.0f, 18.0f,
6123
6124 }));
6125
Jim Flynncbb66aa2019-05-15 13:03:54 +01006126 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006127
6128 LayerTestResult<T, 4> result(outputTensorInfo);
6129
6130 std::vector<T> output;
6131 output.resize(outputTensorInfo.GetNumElements());
6132 Concatenate<T>(workloadFactory,
6133 memoryManager,
6134 {inputTensorInfo0, inputTensorInfo1},
6135 {input0.data(), input1.data()},
6136 outputTensorInfo,
6137 output.data(),
6138 dimension,
6139 true);
6140
6141 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6142 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6143 1.0f, 2.0f,
6144 3.0f, 4.0f,
6145 5.0f, 6.0f,
6146 7.0f, 8.0f,
6147 9.0f, 10.0f,
6148 11.0f, 12.0f,
6149 11.0f, 12.0f,
6150 13.0f, 14.0f,
6151 15.0f, 16.0f,
6152 17.0f, 18.0f
6153 }));
6154
6155 return result;
6156}
6157
6158LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6159 armnn::IWorkloadFactory& workloadFactory,
6160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006162 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6163 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006164}
6165
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006167LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6168 armnn::IWorkloadFactory& workloadFactory,
6169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6170 float qScale,
6171 int32_t qOffset)
6172{
6173 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006174 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006175
6176 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6177 1.0f, 2.0f,
6178 3.0f, 4.0f,
6179 5.0f, 6.0f,
6180 7.0f, 8.0f,
6181 9.0f, 10.0f,
6182 11.0f, 12.0f
6183 }));
6184
Jim Flynncbb66aa2019-05-15 13:03:54 +01006185 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006186
6187 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6188 11.0f, 12.0f,
6189 13.0f, 14.0f,
6190 15.0f, 16.0f,
6191 17.0f, 18.0f,
6192 19.0f, 20.0f,
6193 21.0f, 22.0f,
6194 23.0f, 24.0f,
6195 25.0f, 26.0f,
6196 27.0f, 28.0f
6197 }));
6198
Jim Flynncbb66aa2019-05-15 13:03:54 +01006199 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006200
6201 LayerTestResult<T, 4> result(outputTensorInfo);
6202
6203 std::vector<T> output;
6204 output.resize(outputTensorInfo.GetNumElements());
6205 Concatenate<T>(workloadFactory,
6206 memoryManager,
6207 {inputTensorInfo0, inputTensorInfo1},
6208 {input0.data(), input1.data()},
6209 outputTensorInfo,
6210 output.data(),
6211 dimension,
6212 true);
6213
6214 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6215 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6216 1.0f, 2.0f,
6217 3.0f, 4.0f,
6218 11.0f, 12.0f,
6219 13.0f, 14.0f,
6220 15.0f, 16.0f,
6221
6222 5.0f, 6.0f,
6223 7.0f, 8.0f,
6224 17.0f, 18.0f,
6225 19.0f, 20.0f,
6226 21.0f, 22.0f,
6227
6228 9.0f, 10.0f,
6229 11.0f, 12.0f,
6230 23.0f, 24.0f,
6231 25.0f, 26.0f,
6232 27.0f, 28.0f
6233 }));
6234
6235 return result;
6236}
6237
6238LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6239 armnn::IWorkloadFactory& workloadFactory,
6240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006242 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6243 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006244}
6245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006246template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006247LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6248 armnn::IWorkloadFactory& workloadFactory,
6249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6250 float qScale,
6251 int32_t qOffset,
6252 bool useSubtensor)
6253{
6254 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006255 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006256
6257 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6258 1.0f, 2.0f,
6259 3.0f, 4.0f,
6260 5.0f, 6.0f,
6261 7.0f, 8.0f,
6262 9.0f, 10.0f,
6263 11.0f, 12.0f
6264 }));
6265
Jim Flynncbb66aa2019-05-15 13:03:54 +01006266 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006267
6268 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6269 11.0f, 12.0f, 13.0f,
6270 14.0f, 15.0f, 16.0f,
6271
6272 17.0f, 18.0f, 19.0f,
6273 20.0f, 21.0f, 22.0f,
6274
6275 23.0f, 24.0f, 25.0f,
6276 26.0f, 27.0f, 28.0f
6277 }));
6278
Jim Flynncbb66aa2019-05-15 13:03:54 +01006279 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006280
6281 LayerTestResult<T, 4> result(outputTensorInfo);
6282
6283 std::vector<T> output;
6284 output.resize(outputTensorInfo.GetNumElements());
6285 Concatenate<T>(workloadFactory,
6286 memoryManager,
6287 {inputTensorInfo0, inputTensorInfo1},
6288 {input0.data(), input1.data()},
6289 outputTensorInfo,
6290 output.data(),
6291 dimension,
6292 useSubtensor);
6293
6294 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6295 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6296 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6297 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6298 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6299 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6300 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6301 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6302 }));
6303
6304 return result;
6305}
6306
6307LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6308 armnn::IWorkloadFactory& workloadFactory,
6309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6310 bool useSubtensor)
6311{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006312 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6313 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006314}
6315
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006316LayerTestResult<float, 2> FakeQuantizationTest(
6317 armnn::IWorkloadFactory& workloadFactory,
6318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006319{
6320 constexpr unsigned int width = 2;
6321 constexpr unsigned int height = 3;
6322
6323 const armnn::TensorInfo tensorInfo({height, width },
6324 armnn::DataType::Float32);
6325 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6326 -10.0f, -5.0f,
6327 0.0f, 5.0f,
6328 10.0f, 10.0f
6329 }));
6330
6331 LayerTestResult<float, 2> ret(tensorInfo);
6332
6333 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6334
6335 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6336
6337 armnn::FakeQuantizationQueueDescriptor data;
6338 armnn::WorkloadInfo info;
6339
6340 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6341 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6342 float min = -10.f;
6343 float max = 10.f;
6344
6345 data.m_Parameters.m_Min = min;
6346 data.m_Parameters.m_Max = max;
6347
6348 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6349 armnn::FakeQuantizationQueueDescriptor refData = data;
6350 armnn::WorkloadInfo refInfo = info;
6351 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6352
6353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6354
6355 inputHandle->Allocate();
6356 outputHandle->Allocate();
6357
6358 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6359
Derek Lambertif30f7d32019-04-09 10:25:02 +01006360 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006361 workload->Execute();
6362
6363 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6364
6365 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6366 0.0f, 63.0f,
6367 128.0f, 191.0f,
6368 255.0f, 255.0f
6369 }));
6370 return ret;
6371}
6372
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006373namespace
6374{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006375template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6376LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006377 armnn::IWorkloadFactory& workloadFactory,
6378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6379 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006380 float scale,
6381 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006382 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006383 float outScale,
6384 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006385 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006386 const armnn::DataLayout layout,
6387 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006388{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006389 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6390 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006391
jimfly013aab7c32018-11-12 13:32:08 +00006392 // at this point if we require it permute the input data
6393 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6394 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006395 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006396 {
6397 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006398 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006399 inputData = tmp;
6400 }
6401
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006402 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6403 inputTensorInfo.GetQuantizationScale(),
6404 inputTensorInfo.GetQuantizationOffset(),
6405 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006406
jimfly013aab7c32018-11-12 13:32:08 +00006407 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006408 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006409 {
6410 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006411 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6412 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006413 expectedOutputData = tmp;
6414 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006415
6416 LayerTestResult<T, 4> result(outputTensorInfo);
6417 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6418 outputTensorInfo.GetQuantizationScale(),
6419 outputTensorInfo.GetQuantizationOffset(),
6420 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006421
6422 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6423 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6424
6425 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006426 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006427 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006428 armnn::WorkloadInfo info;
6429
6430 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6431 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6432
6433 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6434
6435 inputHandle->Allocate();
6436 outputHandle->Allocate();
6437
6438 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6439
Derek Lambertif30f7d32019-04-09 10:25:02 +01006440 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006441 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006442
6443 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6444
6445 return result;
6446}
6447
6448float CalcInvL2Norm(std::initializer_list<float> elements)
6449{
6450 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6451 [](float acc, float element) { return acc + element * element; });
6452 return 1.0f / sqrtf(reduction);
6453}
6454
6455} // anonymous namespace
6456
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006457template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006458LayerTestResult<T, 2> Pad2dTestCommon(
6459 armnn::IWorkloadFactory& workloadFactory,
6460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6461 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006462 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006463 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006464{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006465 const armnn::TensorShape inputShape{ 3, 3 };
6466 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006467
David Monahan34757812019-06-19 11:47:21 +01006468 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6469 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006470
Derek Lambertif30f7d32019-04-09 10:25:02 +01006471 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006472 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006473 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006474 // Height (3) x Width (3)
6475 4, 8, 6,
6476 7, 4, 4,
6477 3, 2, 4
6478 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006479
Teresa Charlinec8e1982019-07-02 16:24:09 +01006480 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006481 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006482 expectedOutputValues = (
6483 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006484 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006485 p, p, p, p, p, p, p,
6486 p, p, p, p, p, p, p,
6487 p, p, 4, 8, 6, p, p,
6488 p, p, 7, 4, 4, p, p,
6489 p, p, 3, 2, 4, p, p,
6490 p, p, p, p, p, p, p,
6491 p, p, p, p, p, p, p
6492 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006493
Derek Lambertif30f7d32019-04-09 10:25:02 +01006494 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006495
Derek Lambertif30f7d32019-04-09 10:25:02 +01006496 LayerTestResult<T, 2> result(outputTensorInfo);
6497 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006498
Derek Lambertif30f7d32019-04-09 10:25:02 +01006499 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6500 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006501
Derek Lambertif30f7d32019-04-09 10:25:02 +01006502 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006503
Teresa Charlinec8e1982019-07-02 16:24:09 +01006504 std::vector<std::pair<unsigned int, unsigned int>> padList;
6505 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6506 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006507
Teresa Charlinec8e1982019-07-02 16:24:09 +01006508 descriptor.m_Parameters.m_PadList = padList;
6509 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006510 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006511
Derek Lambertif30f7d32019-04-09 10:25:02 +01006512 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6513 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006514
Derek Lambertif30f7d32019-04-09 10:25:02 +01006515 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006516
Derek Lambertif30f7d32019-04-09 10:25:02 +01006517 inputHandle->Allocate();
6518 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006519
Derek Lambertif30f7d32019-04-09 10:25:02 +01006520 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006521
Derek Lambertif30f7d32019-04-09 10:25:02 +01006522 workload->PostAllocationConfigure();
6523 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006524
Derek Lambertif30f7d32019-04-09 10:25:02 +01006525 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006526
Derek Lambertif30f7d32019-04-09 10:25:02 +01006527 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006528}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006529
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006530template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006531LayerTestResult<T, 3> Pad3dTestCommon(
6532 armnn::IWorkloadFactory& workloadFactory,
6533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6534 float qScale,
6535 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006536{
6537 const armnn::TensorShape inputShape{ 2, 2, 2 };
6538 const armnn::TensorShape outputShape{ 3, 5, 6 };
6539
David Monahan34757812019-06-19 11:47:21 +01006540 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6541 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006542
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006543 std::vector<T> inputValues(
6544 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006545 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006546 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006547 0, 4,
6548 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006549
6550 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006551 6, 1,
6552 5, 2
6553 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006554
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006555 std::vector<T> expectedOutputValues(
6556 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006557 {
6558
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006559 0, 0, 0, 0, 0, 0,
6560 0, 0, 0, 0, 0, 0,
6561 0, 0, 0, 4, 0, 0,
6562 0, 0, 2, 5, 0, 0,
6563 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006564
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006565 0, 0, 0, 0, 0, 0,
6566 0, 0, 0, 0, 0, 0,
6567 0, 0, 6, 1, 0, 0,
6568 0, 0, 5, 2, 0, 0,
6569 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006570
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006571 0, 0, 0, 0, 0, 0,
6572 0, 0, 0, 0, 0, 0,
6573 0, 0, 0, 0, 0, 0,
6574 0, 0, 0, 0, 0, 0,
6575 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006576
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006577 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006578
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006579 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006580
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006581 LayerTestResult<T, 3> result(outputTensorInfo);
6582 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006583
6584 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6585 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6586
6587 armnn::PadQueueDescriptor descriptor;
6588
6589 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6590 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6591 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6592 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6593
6594 descriptor.m_Parameters.m_PadList = PadList;
6595 armnn::WorkloadInfo info;
6596
6597 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6598 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6599
6600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6601
6602 inputHandle->Allocate();
6603 outputHandle->Allocate();
6604
6605 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6606
Derek Lambertif30f7d32019-04-09 10:25:02 +01006607 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006608 workload->Execute();
6609
6610 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6611
6612 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006613}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006614
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006615template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006616LayerTestResult<T, 4> Pad4dTestCommon(
6617 armnn::IWorkloadFactory& workloadFactory,
6618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6619 float qScale,
6620 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006621{
6622 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6623 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6624
David Monahan34757812019-06-19 11:47:21 +01006625 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6626 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006627
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006628 std::vector<T> inputValues(
6629 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006630 {
6631 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006632 0, 1,
6633 2, 3,
6634 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006635
6636 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006637 6, 7,
6638 8, 9,
6639 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006640
6641 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006642 12, 13,
6643 14, 15,
6644 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006645
6646 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006647 18, 19,
6648 20, 21,
6649 22, 23
6650 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006651
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006652 std::vector<T> expectedOutputValues(
6653 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006654 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006655 0, 0, 0, 0,
6656 0, 0, 0, 0,
6657 0, 0, 0, 0,
6658 0, 0, 0, 0,
6659 0, 0, 0, 0,
6660 0, 0, 0, 0,
6661 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006662
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006663 0, 0, 0, 0,
6664 0, 0, 0, 0,
6665 0, 0, 0, 0,
6666 0, 0, 0, 0,
6667 0, 0, 0, 0,
6668 0, 0, 0, 0,
6669 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006670
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006671 0, 0, 0, 0,
6672 0, 0, 0, 0,
6673 0, 0, 0, 0,
6674 0, 0, 0, 0,
6675 0, 0, 0, 0,
6676 0, 0, 0, 0,
6677 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006678
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006679 0, 0, 0, 0,
6680 0, 0, 0, 0,
6681 0, 0, 0, 0,
6682 0, 0, 0, 0,
6683 0, 0, 0, 0,
6684 0, 0, 0, 0,
6685 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006686
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006687 0, 0, 0, 0,
6688 0, 0, 0, 0,
6689 0, 0, 0, 0,
6690 0, 0, 0, 0,
6691 0, 0, 0, 0,
6692 0, 0, 0, 0,
6693 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006694
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006695 0, 0, 0, 0,
6696 0, 0, 0, 0,
6697 0, 0, 0, 0,
6698 0, 0, 0, 0,
6699 0, 0, 0, 0,
6700 0, 0, 0, 0,
6701 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006702
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006703 0, 0, 0, 0,
6704 0, 0, 0, 0,
6705 0, 0, 0, 0,
6706 0, 0, 0, 0,
6707 0, 0, 0, 0,
6708 0, 0, 0, 0,
6709 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006710
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006711 0, 0, 0, 0,
6712 0, 0, 0, 0,
6713 0, 0, 0, 0,
6714 0, 0, 1, 0,
6715 0, 2, 3, 0,
6716 0, 4, 5, 0,
6717 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006718
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006719 0, 0, 0, 0,
6720 0, 0, 0, 0,
6721 0, 0, 0, 0,
6722 0, 6, 7, 0,
6723 0, 8, 9, 0,
6724 0, 10, 11, 0,
6725 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006727 0, 0, 0, 0,
6728 0, 0, 0, 0,
6729 0, 0, 0, 0,
6730 0, 0, 0, 0,
6731 0, 0, 0, 0,
6732 0, 0, 0, 0,
6733 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006734
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006735 0, 0, 0, 0,
6736 0, 0, 0, 0,
6737 0, 0, 0, 0,
6738 0, 0, 0, 0,
6739 0, 0, 0, 0,
6740 0, 0, 0, 0,
6741 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006743 0, 0, 0, 0,
6744 0, 0, 0, 0,
6745 0, 0, 0, 0,
6746 0, 0, 0, 0,
6747 0, 0, 0, 0,
6748 0, 0, 0, 0,
6749 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006751 0, 0, 0, 0,
6752 0, 0, 0, 0,
6753 0, 0, 0, 0,
6754 0, 12, 13, 0,
6755 0, 14, 15, 0,
6756 0, 16, 17, 0,
6757 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006758
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006759 0, 0, 0, 0,
6760 0, 0, 0, 0,
6761 0, 0, 0, 0,
6762 0, 18, 19, 0,
6763 0, 20, 21, 0,
6764 0, 22, 23, 0,
6765 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006766
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006767 0, 0, 0, 0,
6768 0, 0, 0, 0,
6769 0, 0, 0, 0,
6770 0, 0, 0, 0,
6771 0, 0, 0, 0,
6772 0, 0, 0, 0,
6773 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006774
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006775 0, 0, 0, 0,
6776 0, 0, 0, 0,
6777 0, 0, 0, 0,
6778 0, 0, 0, 0,
6779 0, 0, 0, 0,
6780 0, 0, 0, 0,
6781 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006782
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006783 0, 0, 0, 0,
6784 0, 0, 0, 0,
6785 0, 0, 0, 0,
6786 0, 0, 0, 0,
6787 0, 0, 0, 0,
6788 0, 0, 0, 0,
6789 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006790
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006791 0, 0, 0, 0,
6792 0, 0, 0, 0,
6793 0, 0, 0, 0,
6794 0, 0, 0, 0,
6795 0, 0, 0, 0,
6796 0, 0, 0, 0,
6797 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006798
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006799 0, 0, 0, 0,
6800 0, 0, 0, 0,
6801 0, 0, 0, 0,
6802 0, 0, 0, 0,
6803 0, 0, 0, 0,
6804 0, 0, 0, 0,
6805 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006807 0, 0, 0, 0,
6808 0, 0, 0, 0,
6809 0, 0, 0, 0,
6810 0, 0, 0, 0,
6811 0, 0, 0, 0,
6812 0, 0, 0, 0,
6813 0, 0, 0, 0
6814 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006816 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006817
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006818 LayerTestResult<T, 4> result(outputTensorInfo);
6819 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006820
6821 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6823
6824 armnn::PadQueueDescriptor descriptor;
6825
6826 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6827 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6828 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6829 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6830 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6831
6832 descriptor.m_Parameters.m_PadList = PadList;
6833 armnn::WorkloadInfo info;
6834
6835 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6836 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6837
6838 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6839
6840 inputHandle->Allocate();
6841 outputHandle->Allocate();
6842
6843 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6844
Derek Lambertif30f7d32019-04-09 10:25:02 +01006845 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006846 workload->Execute();
6847
6848 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6849
6850 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006851}
6852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006853LayerTestResult<uint8_t, 2> PadUint82dTest(
6854 armnn::IWorkloadFactory& workloadFactory,
6855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006856{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006857 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006858}
6859
David Monahan34757812019-06-19 11:47:21 +01006860LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6861 armnn::IWorkloadFactory& workloadFactory,
6862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6863{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006864 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006865}
6866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006867LayerTestResult<uint8_t, 3> PadUint83dTest(
6868 armnn::IWorkloadFactory& workloadFactory,
6869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006870{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006871 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006872}
6873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006874LayerTestResult<uint8_t, 4> PadUint84dTest(
6875 armnn::IWorkloadFactory& workloadFactory,
6876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006877{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006878 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006879}
6880
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006881
6882template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6883Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6884 armnn::IWorkloadFactory& workloadFactory,
6885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6886 float qScale,
6887 int32_t qOffset,
6888 const float customPaddingValue);
6889
6890template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6891Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6892 armnn::IWorkloadFactory& workloadFactory,
6893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6894 float qScale,
6895 int32_t qOffset);
6896
6897template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6898Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6899 armnn::IWorkloadFactory& workloadFactory,
6900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6901 float qScale,
6902 int32_t qOffset);
6903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006904LayerTestResult<float, 2> PadFloat322dTest(
6905 armnn::IWorkloadFactory& workloadFactory,
6906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006907{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006908 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006909}
6910
David Monahan34757812019-06-19 11:47:21 +01006911LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6912 armnn::IWorkloadFactory& workloadFactory,
6913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6914{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006915 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006916}
6917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006918LayerTestResult<float, 3> PadFloat323dTest(
6919 armnn::IWorkloadFactory& workloadFactory,
6920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006921{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006922 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006923}
6924
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006925LayerTestResult<float, 4> PadFloat324dTest(
6926 armnn::IWorkloadFactory& workloadFactory,
6927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006928{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006929 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006930}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006931
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006932template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006933LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6934 armnn::IWorkloadFactory& workloadFactory,
6935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6936 float scale,
6937 int32_t offset,
6938 float outScale,
6939 int32_t outOffset,
6940 const armnn::DataLayout layout,
6941 float epsilon)
6942{
6943 // Width: 1
6944 // Height: 1
6945 // Channels: 3
6946 // BatchSize: 1
6947 unsigned int numberOfBatches = 1;
6948 unsigned int numberOfChannels = 3;
6949 unsigned int height = 1;
6950 unsigned int width = 1;
6951
6952 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6953 numberOfBatches, numberOfChannels, height, width, layout);
6954
6955 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6956 std::vector<float> inputValues
6957 {
6958 // Batch 0, Channel 0, Height (1) x Width (1)
6959 0.00000001f,
6960
6961 // Batch 0, Channel 1, Height (1) x Width (1)
6962 0.00000002f,
6963
6964 // Batch 0, Channel 2, Height (1) x Width (1)
6965 0.00000003f,
6966 };
6967
6968 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6969 std::vector<float> expectedOutputValues
6970 {
6971 // Batch 0, Channel 0, Height (1) x Width (1)
6972 0.00000001f * approxInvL2Norm,
6973 0.00000002f * approxInvL2Norm,
6974 0.00000003f * approxInvL2Norm,
6975 };
6976
6977 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6978 inputValues, outScale, outOffset, expectedOutputValues, layout,
6979 epsilon);
6980}
6981
6982
6983template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006984LayerTestResult<T, 4> L2Normalization1dTestCommon(
6985 armnn::IWorkloadFactory& workloadFactory,
6986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006987 float scale,
6988 int32_t offset,
6989 float outScale,
6990 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006991 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006992{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006993 // Width: 1
6994 // Height: 1
6995 // Channels: 10
6996 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006997 unsigned int numberOfBatches = 1;
6998 unsigned int numberOfChannels = 10;
6999 unsigned int height = 1;
7000 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00007001
jimfly013aab7c32018-11-12 13:32:08 +00007002
Nina Drozdd41b2592018-11-19 13:03:36 +00007003 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007004 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007005 std::vector<float> inputValues
7006 {
7007 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007008 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00007009
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007010 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007011 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00007012
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007013 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007014 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00007015
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007016 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007017 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007018
7019 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007020 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007021
7022 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007023 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007024
7025 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007026 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007027
7028 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007029 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007030
7031 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007032 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007033
7034 // Batch 0, Channel 9, Height (1) x Width (1)
7035 10.0f
7036 };
telsoa014fcda012018-03-09 14:13:49 +00007037 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007038 std::vector<float> expectedOutputValues
7039 {
7040 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007041 1.0f * approxInvL2Norm,
7042 2.0f * approxInvL2Norm,
7043 3.0f * approxInvL2Norm,
7044 4.0f * approxInvL2Norm,
7045 5.0f * approxInvL2Norm,
7046 6.0f * approxInvL2Norm,
7047 7.0f * approxInvL2Norm,
7048 8.0f * approxInvL2Norm,
7049 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00007050 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007051 };
telsoa014fcda012018-03-09 14:13:49 +00007052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007053
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007054 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7055 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00007056}
7057
Ferran Balaguere52211e2019-06-17 12:23:52 +01007058LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7059 armnn::IWorkloadFactory& workloadFactory,
7060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7061 const armnn::DataLayout layout)
7062{
7063 // Dummy descriptor to get the default value of epsilon.
7064 armnn::L2NormalizationDescriptor descriptor;
7065
7066 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7067 layout, descriptor.m_Eps);
7068}
7069
7070LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7071 armnn::IWorkloadFactory& workloadFactory,
7072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7073 const armnn::DataLayout layout)
7074{
7075 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7076 layout, 1e-9f);
7077}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007078
7079LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007080 armnn::IWorkloadFactory& workloadFactory,
7081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007082 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007083{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007084 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007085}
7086
7087LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7088 armnn::IWorkloadFactory& workloadFactory,
7089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7090 const armnn::DataLayout layout)
7091{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007092 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007093 layout);
7094}
7095
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007096LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7097 armnn::IWorkloadFactory& workloadFactory,
7098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7099 const armnn::DataLayout layout)
7100{
7101 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7102 1.f/128, 128, layout);
7103}
7104
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007105template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7106LayerTestResult<T, 4> L2Normalization2dTestCommon(
7107 armnn::IWorkloadFactory& workloadFactory,
7108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007109 float scale,
7110 int32_t offset,
7111 float outScale,
7112 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007113 const armnn::DataLayout layout)
7114{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007115 // Width: 5
7116 // Height: 1
7117 // Channels: 2
7118 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007119 unsigned int numberOfBatches = 1;
7120 unsigned int numberOfChannels = 2;
7121 unsigned int height = 1;
7122 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00007123
Nina Drozdd41b2592018-11-19 13:03:36 +00007124 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007125 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007126 std::vector<float> inputValues
7127 {
7128 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00007129 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00007130
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007131 // Batch 0, Channel 1, Height (1) x Width (5)
7132 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7133 };
7134 std::vector<float> expectedOutputValues
7135 {
7136 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007137 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7138 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7139 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7140 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7141 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007142
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007143 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007144 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7145 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7146 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7147 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007148 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007149 };
telsoa014fcda012018-03-09 14:13:49 +00007150
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007151 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7152 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007153}
telsoa014fcda012018-03-09 14:13:49 +00007154
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007155LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007156 armnn::IWorkloadFactory& workloadFactory,
7157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007158 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007159{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007160 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7161 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007162}
7163
7164LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7165 armnn::IWorkloadFactory& workloadFactory,
7166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7167 const armnn::DataLayout layout)
7168{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007169 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007170 layout);
7171}
7172
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007173LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7174 armnn::IWorkloadFactory& workloadFactory,
7175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7176 const armnn::DataLayout layout)
7177{
7178 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7179 1.f/128, 128, layout);
7180}
7181
Matthew Jackson82b15ed2019-07-25 16:14:30 +01007182LayerTestResult<float, 2> L2Normalization2dShapeTest(
7183 armnn::IWorkloadFactory& workloadFactory,
7184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7185{
7186 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
7187 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
7188
7189 std::vector<float> inputData
7190 {
7191 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
7192 };
7193 std::vector<float> expectedOutputData
7194 {
7195 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7196 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7197 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7198 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7199 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7200 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7201 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7202 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7203 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7204 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7205 };
7206
7207 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7208 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7209
7210 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
7211 inputTensorInfo.GetQuantizationScale(),
7212 inputTensorInfo.GetQuantizationOffset(),
7213 inputData));
7214
7215 LayerTestResult<float, 2> result(outputTensorInfo);
7216 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
7217 outputTensorInfo.GetQuantizationScale(),
7218 outputTensorInfo.GetQuantizationOffset(),
7219 expectedOutputData));
7220
7221 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7223
7224 armnn::L2NormalizationQueueDescriptor descriptor;
7225 descriptor.m_Parameters.m_Eps = 1e-12f;
7226 descriptor.m_Parameters.m_DataLayout = layout;
7227 armnn::WorkloadInfo info;
7228
7229 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7230 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7231
7232 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
7233
7234 inputHandle->Allocate();
7235 outputHandle->Allocate();
7236
7237 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7238
7239 workload->PostAllocationConfigure();
7240 ExecuteWorkload(*workload, memoryManager);
7241
7242 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7243
7244 return result;
7245}
7246
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007247template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7248LayerTestResult<T, 4> L2Normalization3dTestCommon(
7249 armnn::IWorkloadFactory& workloadFactory,
7250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007251 float scale,
7252 int32_t offset,
7253 float outScale,
7254 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007255 const armnn::DataLayout layout)
7256{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007257 // Width: 3
7258 // Height: 4
7259 // Channels: 2
7260 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007261 unsigned int numberOfBatches = 1;
7262 unsigned int numberOfChannels = 2;
7263 unsigned int height = 4;
7264 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007265
Nina Drozdd41b2592018-11-19 13:03:36 +00007266 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007267 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007268 std::vector<float> inputValues
7269 {
7270 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007271 119.0f, 21.0f, 150.0f,
7272 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007273 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00007274 147.0f, 199.0f, 220.0f,
7275
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007276 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007277 110.0f, 140.0f, 73.0f,
7278 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007279 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007280 162.0f, 12.0f, 161.0f
7281 };
7282 std::vector<float> expectedOutputValues
7283 {
7284 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007285 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007286 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007287 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
7288 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007289 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007290 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007291 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007292 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7293 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7294 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7295 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
7296 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7297
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007298 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007299 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7300 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007301 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007302 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7303 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007304 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
7305 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007306 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7307 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7308 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007309 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007310 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7311 };
telsoa014fcda012018-03-09 14:13:49 +00007312
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007313 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7314 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007315}
telsoa014fcda012018-03-09 14:13:49 +00007316
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007317LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007318 armnn::IWorkloadFactory& workloadFactory,
7319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007320 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007321{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007322 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7323 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007324}
7325
7326LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7327 armnn::IWorkloadFactory& workloadFactory,
7328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7329 const armnn::DataLayout layout)
7330{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007331 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007332 layout);
7333}
7334
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007335LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7336 armnn::IWorkloadFactory& workloadFactory,
7337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7338 const armnn::DataLayout layout)
7339{
7340 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7341 1.f/128, 128, layout);
7342}
7343
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007344template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7345LayerTestResult<T, 4> L2Normalization4dTestCommon(
7346 armnn::IWorkloadFactory& workloadFactory,
7347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007348 float scale,
7349 int32_t offset,
7350 float outScale,
7351 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007352 const armnn::DataLayout layout)
7353{
7354 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007355 // Height: 4
7356 // Channels: 3
7357 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00007358 unsigned int numberOfBatches = 2;
7359 unsigned int numberOfChannels = 3;
7360 unsigned int height = 4;
7361 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007362
Nina Drozdd41b2592018-11-19 13:03:36 +00007363 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007364 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007365 std::vector<float> inputValues
7366 {
7367 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007368 235.0f, 46.0f, 178.0f,
7369 100.0f, 123.0f, 19.0f,
7370 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007371 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00007372
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007373 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007374 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007375 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00007376 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007377 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00007378
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007379 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007380 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00007381 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007382 12.0f, 209.0f, 200.0f,
7383 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00007384
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007385 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007386 67.0f, 90.0f, 49.0f,
7387 7.0f, 163.0f, 18.0f,
7388 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00007389 247.0f, 59.0f, 189.0f,
7390
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007391 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007392 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007393 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00007394 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007395 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00007396
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007397 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007398 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00007399 115.0f, 116.0f, 238.0f,
7400 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007401 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007402 };
7403 std::vector<float> expectedOutputValues
7404 {
7405 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007406 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007407 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007408 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7409 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7410 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007411 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007412 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007413 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007414 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007415 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007416 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007417 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007418
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007419 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007420 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007421 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007422 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007423 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007424 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007425 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007426 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7427 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7428 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007429 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7430 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7431 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007432
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007433 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007434 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007435 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7436 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7437 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007438 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007439 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007440 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007441 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7442 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007443 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7444 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7445 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007446
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007447 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007448 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7449 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7450 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7451 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007452 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007453 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7454 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007455 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7456 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7457 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007458 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007459 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7460
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007461 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007462 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7463 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7464 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007465 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007466 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7467 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7468 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7469 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007470 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7471 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007472 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007473 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007474
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007475 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007476 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007477 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7478 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7479 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7480 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7481 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7482 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007483 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007484 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007485 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007486 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007487 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007488 };
telsoa014fcda012018-03-09 14:13:49 +00007489
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007490 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7491 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007492}
7493
7494LayerTestResult<float, 4> L2Normalization4dTest(
7495 armnn::IWorkloadFactory& workloadFactory,
7496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7497 const armnn::DataLayout layout)
7498{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007499 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7500 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007501}
7502
7503LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7504 armnn::IWorkloadFactory& workloadFactory,
7505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7506 const armnn::DataLayout layout)
7507{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007508 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007509 layout);
telsoa014fcda012018-03-09 14:13:49 +00007510}
7511
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007512LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7513 armnn::IWorkloadFactory& workloadFactory,
7514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7515 const armnn::DataLayout layout)
7516{
7517 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7518 1.f/128, 128, layout);
7519}
7520
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007521template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007522LayerTestResult<T, 4> ConstantTestImpl(
7523 armnn::IWorkloadFactory& workloadFactory,
7524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007525 float qScale,
7526 int32_t qOffset)
7527{
7528 constexpr unsigned int inputWidth = 3;
7529 constexpr unsigned int inputHeight = 4;
7530 constexpr unsigned int inputChannels = 3;
7531 constexpr unsigned int inputBatchSize = 2;
7532
7533 constexpr unsigned int outputWidth = inputWidth;
7534 constexpr unsigned int outputHeight = inputHeight;
7535 constexpr unsigned int outputChannels = inputChannels;
7536 constexpr unsigned int outputBatchSize = inputBatchSize;
7537
Nina Drozd58ef2c62019-05-16 12:09:18 +01007538 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7539 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007540
Nina Drozd58ef2c62019-05-16 12:09:18 +01007541 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7542 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007543
7544 // Set quantization parameters if the requested type is a quantized type.
7545 if(armnn::IsQuantizedType<T>())
7546 {
7547 inputTensorInfo.SetQuantizationScale(qScale);
7548 inputTensorInfo.SetQuantizationOffset(qOffset);
7549 outputTensorInfo.SetQuantizationScale(qScale);
7550 outputTensorInfo.SetQuantizationOffset(qOffset);
7551 }
7552
7553 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7554 QuantizedVector<T>(qScale, qOffset, {
7555 // Batch 0, Channel 0
7556 235.0f, 46.0f, 178.0f,
7557 100.0f, 123.0f, 19.0f,
7558 172.0f, 74.0f, 250.0f,
7559 6.0f, 195.0f, 80.0f,
7560
7561 // Batch 0, Channel 1
7562 113.0f, 95.0f, 202.0f,
7563 77.0f, 114.0f, 71.0f,
7564 122.0f, 246.0f, 166.0f,
7565 82.0f, 28.0f, 37.0f,
7566
7567 // Batch 0, Channel 2
7568 56.0f, 170.0f, 162.0f,
7569 194.0f, 89.0f, 254.0f,
7570 12.0f, 209.0f, 200.0f,
7571 1.0f, 64.0f, 54.0f,
7572
7573 // Batch 1, Channel 0
7574 67.0f, 90.0f, 49.0f,
7575 7.0f, 163.0f, 18.0f,
7576 25.0f, 117.0f, 103.0f,
7577 247.0f, 59.0f, 189.0f,
7578
7579 // Batch 1, Channel 1
7580 239.0f, 104.0f, 199.0f,
7581 17.0f, 124.0f, 153.0f,
7582 222.0f, 217.0f, 75.0f,
7583 32.0f, 126.0f, 21.0f,
7584
7585 // Batch 1, Channel 2
7586 97.0f, 145.0f, 215.0f,
7587 115.0f, 116.0f, 238.0f,
7588 226.0f, 16.0f, 132.0f,
7589 92.0f, 125.0f, 88.0f,
7590 })));
7591
7592 LayerTestResult<T, 4> result(outputTensorInfo);
7593 result.outputExpected = input;
7594
7595 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7596
7597 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7598 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7599
7600 armnn::ConstantQueueDescriptor descriptor;
7601 descriptor.m_LayerOutput = &constantTensor;
7602
7603 armnn::WorkloadInfo info;
7604 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7605
7606 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7607
7608 outputHandle->Allocate();
7609
Derek Lambertif30f7d32019-04-09 10:25:02 +01007610 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007611 workload->Execute();
7612
7613 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7614 return result;
7615}
7616
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007617LayerTestResult<float, 4> ConstantTest(
7618 armnn::IWorkloadFactory& workloadFactory,
7619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007621 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007622}
7623
Nina Drozd58ef2c62019-05-16 12:09:18 +01007624LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7625 armnn::IWorkloadFactory& workloadFactory,
7626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7627{
7628 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7629}
7630
7631LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007632 armnn::IWorkloadFactory& workloadFactory,
7633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007635 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007636}
7637
Jim Flynn4ed6c832019-05-20 11:02:46 +01007638LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007639 armnn::IWorkloadFactory& workloadFactory,
7640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7641{
7642 unsigned int outputWidth = 3;
7643 unsigned int outputHeight = 6;
7644 unsigned int outputChannels = 3;
7645
7646 unsigned int inputWidth1 = 3;
7647 unsigned int inputHeight1 = 6;
7648 unsigned int inputChannels1 = 2;
7649
7650 unsigned int inputWidth2 = 3;
7651 unsigned int inputHeight2 = 6;
7652 unsigned int inputChannels2 = 1;
7653
7654 // Defines the tensor descriptors.
7655 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7656 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7657 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7658
7659 // Quantized input1 tensor. Range [-3, 1]
7660 const float inputScale1 = 0.015686f;
7661 const int32_t inputOffset1 = 192;
7662
7663 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7664 {
7665 1, 2, 3,
7666 4, 5, 6,
7667 7, 8, 9,
7668 10, 11, 12,
7669 13, 14, 15,
7670 16, 17, 18,
7671
7672 19, 20, 21,
7673 22, 23, 24,
7674 25, 26, 27,
7675 28, 29, 30,
7676 31, 32, 33,
7677 34, 35, 36,
7678 })
7679 );
7680
7681 // Quatized input2 tensor. Range [-1, 4]
7682 const float inputScale2 = 0.019608f;
7683 const int32_t inputOffset2 = 50;
7684
7685 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7686 {
7687 37, 38, 39,
7688 40, 41, 42,
7689 43, 44, 45,
7690 46, 47, 48,
7691 49, 50, 51,
7692 52, 53, 54,
7693 })
7694 );
7695
7696 // Output has the same quantization parameters than input1,
7697 // so that only the requantization of input2 is required
7698 const float outputScale = 0.015686f;
7699 const int32_t outputOffset = 192;
7700
7701 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7702
7703 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7704 {
7705 1, 2, 3,
7706 4, 5, 6,
7707 7, 8, 9,
7708 10, 11, 12,
7709 13, 14, 15,
7710 16, 17, 18,
7711
7712 19, 20, 21,
7713 22, 23, 24,
7714 25, 26, 27,
7715 28, 29, 30,
7716 31, 32, 33,
7717 34, 35, 36,
7718
7719 176, 177, 178,
7720 179, 181, 182,
7721 183, 184, 186,
7722 187, 188, 189,
7723 191, 192, 193,
7724 195, 196, 197,
7725 })
7726 );
7727
7728 outputTensorInfo.SetQuantizationScale(outputScale);
7729 outputTensorInfo.SetQuantizationOffset(outputOffset);
7730 inputTensorInfo1.SetQuantizationScale(inputScale1);
7731 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7732 inputTensorInfo2.SetQuantizationScale(inputScale2);
7733 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7734
7735 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007736 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007737
7738 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007739 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007740
7741 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7742
7743 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7744
7745 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7746 subTensorsSupported ?
7747 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7748 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7749
7750 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7751 subTensorsSupported ?
7752 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7753 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7754
Jim Flynne242f2d2019-05-22 14:24:13 +01007755 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007756 armnn::WorkloadInfo info;
7757 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7758 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7759 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7760
7761 data.m_ViewOrigins.push_back(window1);
7762 data.m_ViewOrigins.push_back(window2);
7763
Jim Flynn4ed6c832019-05-20 11:02:46 +01007764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007765
7766 inputHandle1->Allocate();
7767 inputHandle2->Allocate();
7768 outputHandle->Allocate();
7769
7770 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7771 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7772
Derek Lambertif30f7d32019-04-09 10:25:02 +01007773 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007774 workload->Execute();
7775
7776 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7777
7778 return ret;
7779}
7780
Jim Flynn4ed6c832019-05-20 11:02:46 +01007781LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007782 armnn::IWorkloadFactory& workloadFactory,
7783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007784{
surmeh013537c2c2018-05-18 16:31:43 +01007785 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007786 unsigned int outputHeight = 6;
7787 unsigned int outputChannels = 3;
7788
surmeh013537c2c2018-05-18 16:31:43 +01007789 unsigned int inputWidth1 = 3;
7790 unsigned int inputHeight1 = 6;
7791 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007792
surmeh013537c2c2018-05-18 16:31:43 +01007793 unsigned int inputWidth2 = 3;
7794 unsigned int inputHeight2 = 6;
7795 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007796
telsoa01c577f2c2018-08-31 09:22:23 +01007797 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007798 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7799 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7800 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007801
Jim Flynn4ed6c832019-05-20 11:02:46 +01007802 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007803 const float scale = 0.13497836f;
7804 const int32_t offset = -7;
7805
7806 outputTensorInfo.SetQuantizationScale(scale);
7807 outputTensorInfo.SetQuantizationOffset(offset);
7808 inputTensorInfo1.SetQuantizationScale(scale);
7809 inputTensorInfo1.SetQuantizationOffset(offset);
7810 inputTensorInfo2.SetQuantizationScale(scale);
7811 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007812
7813 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7814
7815 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007816 {
7817 1, 2, 3,
7818 4, 5, 6,
7819 7, 8, 9,
7820 10, 11, 12,
7821 13, 14, 15,
7822 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007823
surmeh013537c2c2018-05-18 16:31:43 +01007824 19, 20, 21,
7825 22, 23, 24,
7826 25, 26, 27,
7827 28, 29, 30,
7828 31, 32, 33,
7829 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007830
surmeh013537c2c2018-05-18 16:31:43 +01007831 37, 38, 39,
7832 40, 41, 42,
7833 43, 44, 45,
7834 46, 47, 48,
7835 49, 50, 51,
7836 52, 53, 54,
7837 })
telsoa014fcda012018-03-09 14:13:49 +00007838 );
7839
telsoa014fcda012018-03-09 14:13:49 +00007840 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7841 {
surmeh013537c2c2018-05-18 16:31:43 +01007842 1, 2, 3,
7843 4, 5, 6,
7844 7, 8, 9,
7845 10, 11, 12,
7846 13, 14, 15,
7847 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007848
surmeh013537c2c2018-05-18 16:31:43 +01007849 19, 20, 21,
7850 22, 23, 24,
7851 25, 26, 27,
7852 28, 29, 30,
7853 31, 32, 33,
7854 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007855 })
7856 );
7857
7858 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7859 {
surmeh013537c2c2018-05-18 16:31:43 +01007860 37, 38, 39,
7861 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007862 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007863 46, 47, 48,
7864 49, 50, 51,
7865 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007866 })
7867 );
7868
telsoa01c577f2c2018-08-31 09:22:23 +01007869 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007870 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007871
telsoa01c577f2c2018-08-31 09:22:23 +01007872 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007873 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007874
telsoa014fcda012018-03-09 14:13:49 +00007875
7876 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7877
7878 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7879
7880 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7881 subTensorsSupported ?
7882 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7883 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7884
7885 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7886 subTensorsSupported ?
7887 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7888 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7889
telsoa014fcda012018-03-09 14:13:49 +00007890
Jim Flynne242f2d2019-05-22 14:24:13 +01007891 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007892 armnn::WorkloadInfo info;
7893 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7894 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007895 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7896
7897 data.m_ViewOrigins.push_back(window1);
7898 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007899
Jim Flynn4ed6c832019-05-20 11:02:46 +01007900 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007901
7902 inputHandle1->Allocate();
7903 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007904 outputHandle->Allocate();
7905
7906 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7907 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007908
Derek Lambertif30f7d32019-04-09 10:25:02 +01007909 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007910 workload->Execute();
7911
7912 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7913
7914 return ret;
7915}
7916
Jim Flynn4ed6c832019-05-20 11:02:46 +01007917LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007918 armnn::IWorkloadFactory& workloadFactory,
7919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7920{
7921 unsigned int outputWidth = 3;
7922 unsigned int outputHeight = 6;
7923 unsigned int outputChannels = 3;
7924
7925 unsigned int inputWidth1 = 3;
7926 unsigned int inputHeight1 = 6;
7927 unsigned int inputChannels1 = 2;
7928
7929 unsigned int inputWidth2 = 3;
7930 unsigned int inputHeight2 = 6;
7931 unsigned int inputChannels2 = 1;
7932
7933 // Defines the tensor descriptors.
7934 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7935 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7936 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7937
Jim Flynn4ed6c832019-05-20 11:02:46 +01007938 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007939 const float scale = 0.13497836f;
7940 const int32_t offset = -7;
7941
7942 outputTensorInfo.SetQuantizationScale(scale);
7943 outputTensorInfo.SetQuantizationOffset(offset);
7944 inputTensorInfo1.SetQuantizationScale(scale);
7945 inputTensorInfo1.SetQuantizationOffset(offset);
7946 inputTensorInfo2.SetQuantizationScale(scale);
7947 inputTensorInfo2.SetQuantizationOffset(offset);
7948
7949 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7950
7951 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7952 {
7953 1, 2, 3,
7954 4, 5, 6,
7955 7, 8, 9,
7956 10, 11, 12,
7957 13, 14, 15,
7958 16, 17, 18,
7959
7960 19, 20, 21,
7961 22, 23, 24,
7962 25, 26, 27,
7963 28, 29, 30,
7964 31, 32, 33,
7965 34, 35, 36,
7966
7967 37, 38, 39,
7968 40, 41, 42,
7969 43, 44, 45,
7970 46, 47, 48,
7971 49, 50, 51,
7972 52, 53, 54,
7973 }));
7974
7975 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7976 {
7977 1, 2, 3,
7978 4, 5, 6,
7979 7, 8, 9,
7980 10, 11, 12,
7981 13, 14, 15,
7982 16, 17, 18,
7983
7984 19, 20, 21,
7985 22, 23, 24,
7986 25, 26, 27,
7987 28, 29, 30,
7988 31, 32, 33,
7989 34, 35, 36,
7990 }));
7991
7992 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7993 {
7994 37, 38, 39,
7995 40, 41, 42,
7996 43, 44, 45,
7997 46, 47, 48,
7998 49, 50, 51,
7999 52, 53, 54,
8000 }));
8001
8002 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01008003 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008004
8005 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01008006 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008007
8008
8009 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8010
8011 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8012
8013 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8014 subTensorsSupported ?
8015 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8016 workloadFactory.CreateTensorHandle(inputTensorInfo1);
8017
8018 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8019 subTensorsSupported ?
8020 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8021 workloadFactory.CreateTensorHandle(inputTensorInfo2);
8022
8023
Jim Flynne242f2d2019-05-22 14:24:13 +01008024 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01008025 armnn::WorkloadInfo info;
8026 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8027 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8028 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8029
8030 data.m_ViewOrigins.push_back(window1);
8031 data.m_ViewOrigins.push_back(window2);
8032
Jim Flynn4ed6c832019-05-20 11:02:46 +01008033 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008034
8035 inputHandle1->Allocate();
8036 inputHandle2->Allocate();
8037 outputHandle->Allocate();
8038
8039 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8040 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8041
8042 workload->PostAllocationConfigure();
8043 workload->Execute();
8044
8045 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8046
8047 return ret;
8048}
telsoa014fcda012018-03-09 14:13:49 +00008049
surmeh01bceff2f2018-03-29 16:29:27 +01008050namespace
telsoa014fcda012018-03-09 14:13:49 +00008051{
Sadik Armagan2999a022019-04-09 14:20:12 +01008052template <typename T>
8053LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008054 armnn::IWorkloadFactory& workloadFactory,
8055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8056 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008057 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008058 float scale0,
8059 int32_t offset0,
8060 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008061 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008062 float scale1,
8063 int32_t offset1,
8064 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008065 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008066 float outScale,
8067 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01008068{
Sadik Armagan2999a022019-04-09 14:20:12 +01008069 auto dataType = (std::is_same<T, uint8_t>::value ?
8070 armnn::DataType::QuantisedAsymm8 :
8071 armnn::DataType::QuantisedSymm16);
8072
8073 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8074 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8075 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00008076
surmeh01bceff2f2018-03-29 16:29:27 +01008077 inputTensorInfo0.SetQuantizationScale(scale0);
8078 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00008079
surmeh01bceff2f2018-03-29 16:29:27 +01008080 inputTensorInfo1.SetQuantizationScale(scale1);
8081 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00008082
surmeh01bceff2f2018-03-29 16:29:27 +01008083 outputTensorInfo.SetQuantizationScale(outScale);
8084 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00008085
Sadik Armagan2999a022019-04-09 14:20:12 +01008086 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8087 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00008088
Sadik Armagan2999a022019-04-09 14:20:12 +01008089 LayerTestResult<T, 4> result(outputTensorInfo);
8090 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8091
8092 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8093 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8094 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8095
8096 armnn::AdditionQueueDescriptor data;
8097 armnn::WorkloadInfo info;
8098 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8099 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8100 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8101
8102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8103
8104 inputHandle0->Allocate();
8105 inputHandle1->Allocate();
8106 outputHandle->Allocate();
8107
8108 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8109 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8110
Derek Lambertif30f7d32019-04-09 10:25:02 +01008111 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01008112 workload->Execute();
8113
8114 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8115
8116 return result;
8117}
8118} // anonymous namespace
8119
8120LayerTestResult<uint8_t, 4> AdditionUint8Test(
8121 armnn::IWorkloadFactory& workloadFactory,
8122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8123{
8124 const unsigned int shape0[] = { 1, 2, 2, 3 };
8125 const unsigned int shape1[] = { 1, 2, 2, 3 };
8126
8127 std::vector<uint8_t> input0(
8128 {
8129 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
8130 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
8131 });
8132
8133 std::vector<uint8_t> input1(
8134 {
8135 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8136 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8137 });
8138
8139 std::vector<uint8_t> output(
8140 {
8141 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
8142 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8143 });
8144
8145 return AdditionQuantizeTestHelper(workloadFactory,
8146 memoryManager,
8147 shape0, input0, 7.0f, 3,
8148 shape1, input1, 7.0f, 3,
8149 shape0, output, 7.0f, 3);
8150}
8151
8152LayerTestResult<int16_t, 4> AdditionInt16Test(
8153 armnn::IWorkloadFactory& workloadFactory,
8154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8155{
8156 const unsigned int shape0[] = { 1, 2, 2, 3 };
8157 const unsigned int shape1[] = { 1, 2, 2, 3 };
8158
8159 std::vector<int16_t> input0(
8160 {
8161 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
8162 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
8163 });
8164
8165 std::vector<int16_t> input1(
8166 {
8167 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8168 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8169 });
8170
8171 std::vector<int16_t> output(
8172 {
8173 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
8174 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8175 });
8176
8177 return AdditionQuantizeTestHelper(workloadFactory,
8178 memoryManager,
8179 shape0, input0, 7.0f, 0,
8180 shape1, input1, 7.0f, 0,
8181 shape0, output, 7.0f, 0);
8182}
8183
8184namespace
8185{
8186template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8187LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8188 armnn::IWorkloadFactory& workloadFactory,
8189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8190 const unsigned int shape0[4],
8191 const std::vector<T> & values0,
8192 float scale0,
8193 int32_t offset0,
8194 const unsigned int shape1[4],
8195 const std::vector<T> & values1,
8196 float scale1,
8197 int32_t offset1,
8198 const unsigned int outShape[4],
8199 const std::vector<T> & outValues,
8200 float outScale,
8201 int32_t outOffset)
8202{
8203 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8204 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8205 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8206
8207 inputTensorInfo0.SetQuantizationScale(scale0);
8208 inputTensorInfo0.SetQuantizationOffset(offset0);
8209
8210 inputTensorInfo1.SetQuantizationScale(scale1);
8211 inputTensorInfo1.SetQuantizationOffset(offset1);
8212
8213 outputTensorInfo.SetQuantizationScale(outScale);
8214 outputTensorInfo.SetQuantizationOffset(outOffset);
8215
8216 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8217 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8218
8219 LayerTestResult<T, 4> result(outputTensorInfo);
8220 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00008221
surmeh01bceff2f2018-03-29 16:29:27 +01008222 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00008223 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00008224 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8225
8226 armnn::MultiplicationQueueDescriptor data;
8227 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01008228 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8229 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00008230 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8231
8232 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8233
surmeh01bceff2f2018-03-29 16:29:27 +01008234 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008235 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008236 outputHandle->Allocate();
8237
surmeh01bceff2f2018-03-29 16:29:27 +01008238 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008239 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008240
Derek Lambertif30f7d32019-04-09 10:25:02 +01008241 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00008242 workload->Execute();
8243
8244 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8245
8246 return result;
8247}
surmeh01bceff2f2018-03-29 16:29:27 +01008248} // anonymous namespace
8249
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008250LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8251 armnn::IWorkloadFactory& workloadFactory,
8252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008253{
8254 unsigned int batchSize = 1;
8255 unsigned int channels = 2;
8256 unsigned int height = 2;
8257 unsigned int width = 3;
8258 const unsigned int shape[] = { batchSize, channels, height, width };
8259
telsoa01c577f2c2018-08-31 09:22:23 +01008260 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008261 std::vector<uint8_t> input0({
8262 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
8263 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
8264 });
8265
telsoa01c577f2c2018-08-31 09:22:23 +01008266 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008267 std::vector<uint8_t> input1({
8268 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8269 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
8270 });
8271
telsoa01c577f2c2018-08-31 09:22:23 +01008272 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008273 std::vector<uint8_t> output(
8274 {
8275 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8276 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
8277 });
8278
Sadik Armagan2999a022019-04-09 14:20:12 +01008279 // Scale/offset chosen to have output values out of range.
8280 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8281 memoryManager,
8282 shape,
8283 input0,
8284 4.0f,
8285 1,
8286 shape,
8287 input1,
8288 3.0f,
8289 -2,
8290 shape,
8291 output,
8292 1366.255f,
8293 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01008294}
8295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008296LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8297 armnn::IWorkloadFactory& workloadFactory,
8298 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008299{
8300 const unsigned int shape0[] = { 1, 2, 2, 3 };
8301 const unsigned int shape1[] = { 1, 1, 1, 1 };
8302
8303 std::vector<uint8_t> input0({
8304 1, 2, 3, 4, 5, 6,
8305 7, 8, 9, 10, 11, 12
8306 });
8307
8308 std::vector<uint8_t> input1({2});
8309
8310 std::vector<uint8_t> output({
8311 2, 4, 6, 8, 10, 12,
8312 14, 16, 18, 20, 22, 24
8313 });
8314
Sadik Armagan2999a022019-04-09 14:20:12 +01008315 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8316 memoryManager,
8317 shape0,
8318 input0,
8319 1.0f,
8320 0,
8321 shape1,
8322 input1,
8323 1.0f,
8324 0,
8325 shape0,
8326 output,
8327 1.0f,
8328 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008329}
8330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008331LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8332 armnn::IWorkloadFactory& workloadFactory,
8333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008334{
8335 const unsigned int shape0[] = { 1, 2, 2, 3 };
8336 const unsigned int shape1[] = { 1, 1, 1, 3 };
8337
8338 std::vector<uint8_t> input0({
8339 1, 2, 3, 4, 5, 6,
8340 7, 8, 9, 10, 11, 12
8341 });
8342
8343 std::vector<uint8_t> input1({1, 2, 3});
8344
8345 std::vector<uint8_t> output({
8346 1, 4, 9, 4, 10, 18,
8347 7, 16, 27, 10, 22, 36
8348 });
8349
Sadik Armagan2999a022019-04-09 14:20:12 +01008350 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8351 memoryManager,
8352 shape0,
8353 input0,
8354 1.0f,
8355 0,
8356 shape1,
8357 input1,
8358 1.0f,
8359 0,
8360 shape0,
8361 output,
8362 1.0f,
8363 0);
8364}
8365
8366LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8367 armnn::IWorkloadFactory& workloadFactory,
8368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8369{
8370 const unsigned int shape[] = { 1, 2, 2, 3 };
8371
8372 std::vector<int16_t> input0(
8373 {
8374 6, 7, 8, 9, 10, 11,
8375 12, 13, 14, 15, 16, 17
8376 });
8377
8378 std::vector<int16_t> input1(
8379 {
8380 1, 2, 3, 4, 5, 6,
8381 7, 8, 9, 10, 11, 12
8382 });
8383
8384 std::vector<int16_t> output(
8385 {
8386 6, 14, 24, 36, 50, 66,
8387 84, 104, 126, 150, 176, 204
8388 });
8389
8390 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8391 memoryManager,
8392 shape,
8393 input0,
8394 1.0f,
8395 0,
8396 shape,
8397 input1,
8398 1.0f,
8399 0,
8400 shape,
8401 output,
8402 1.0f,
8403 0);
8404}
8405
8406LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8407 armnn::IWorkloadFactory& workloadFactory,
8408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8409{
8410 const unsigned int shape0[] = { 1, 2, 2, 3 };
8411 const unsigned int shape1[] = { 1, 1, 1, 1 };
8412
8413 std::vector<int16_t> input0(
8414 {
8415 1, 2, 3, 4, 5, 6,
8416 7, 8, 9, 10, 11, 12
8417 });
8418
8419 std::vector<int16_t> input1({2});
8420
8421 std::vector<int16_t> output(
8422 {
8423 2, 4, 6, 8, 10, 12,
8424 14, 16, 18, 20, 22, 24
8425 });
8426
8427 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8428 memoryManager,
8429 shape0,
8430 input0,
8431 1.0f,
8432 0,
8433 shape1,
8434 input1,
8435 1.0f,
8436 0,
8437 shape0,
8438 output,
8439 1.0f,
8440 0);
8441}
8442
8443LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8444 armnn::IWorkloadFactory& workloadFactory,
8445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8446{
8447 const unsigned int shape0[] = { 1, 2, 2, 3 };
8448 const unsigned int shape1[] = { 1, 1, 1, 3 };
8449
8450 std::vector<int16_t> input0(
8451 {
8452 1, 2, 3, 4, 5, 6,
8453 7, 8, 9, 10, 11, 12
8454 });
8455
8456 std::vector<int16_t> input1({1, 2, 3});
8457
8458 std::vector<int16_t> output(
8459 {
8460 1, 4, 9, 4, 10, 18,
8461 7, 16, 27, 10, 22, 36
8462 });
8463
8464 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8465 memoryManager,
8466 shape0,
8467 input0,
8468 1.0f,
8469 0,
8470 shape1,
8471 input1,
8472 1.0f,
8473 0,
8474 shape0,
8475 output,
8476 1.0f,
8477 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008478}
telsoa014fcda012018-03-09 14:13:49 +00008479
David Beckf195f032018-09-06 16:46:34 +01008480namespace
8481{
Sadik Armagan2999a022019-04-09 14:20:12 +01008482template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008483LayerTestResult<T, 4> SubtractionTestHelper(
8484 armnn::IWorkloadFactory& workloadFactory,
8485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8486 const unsigned int shape0[4],
8487 const std::vector<T>& values0,
8488 float scale0,
8489 int32_t offset0,
8490 const unsigned int shape1[4],
8491 const std::vector<T> & values1,
8492 float scale1,
8493 int32_t offset1,
8494 const unsigned int outShape[4],
8495 const std::vector<T> & outValues,
8496 float outScale,
8497 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008498{
Sadik Armagan2999a022019-04-09 14:20:12 +01008499 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8500 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8501 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008502
8503 inputTensorInfo0.SetQuantizationScale(scale0);
8504 inputTensorInfo0.SetQuantizationOffset(offset0);
8505
8506 inputTensorInfo1.SetQuantizationScale(scale1);
8507 inputTensorInfo1.SetQuantizationOffset(offset1);
8508
8509 outputTensorInfo.SetQuantizationScale(outScale);
8510 outputTensorInfo.SetQuantizationOffset(outOffset);
8511
8512 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8513 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8514
8515 LayerTestResult<T, 4> result(outputTensorInfo);
8516 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8517
8518 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8519 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8520 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8521
8522 armnn::SubtractionQueueDescriptor data;
8523 armnn::WorkloadInfo info;
8524 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8525 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8526 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8527
8528 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8529
8530 inputHandle0->Allocate();
8531 inputHandle1->Allocate();
8532 outputHandle->Allocate();
8533
8534 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8535 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8536
Derek Lambertif30f7d32019-04-09 10:25:02 +01008537 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008538 workload->Execute();
8539
8540 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8541
8542 return result;
8543}
8544} // anonymous namespace
8545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008546LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8547 armnn::IWorkloadFactory& workloadFactory,
8548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008549{
8550 const unsigned int shape0[] = { 1, 1, 2, 2 };
8551 const unsigned int shape1[] = { 1, 1, 2, 2 };
8552
8553 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8554 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8555 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8556
Sadik Armagan2999a022019-04-09 14:20:12 +01008557 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8558 memoryManager,
8559 shape0, input0, 0.5f, 2,
8560 shape1, input1, 1.0f, 0,
8561 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008562}
8563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008564LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8565 armnn::IWorkloadFactory& workloadFactory,
8566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008567{
8568 const unsigned int shape0[] = { 1, 1, 2, 2 };
8569 const unsigned int shape1[] = { 1, 1, 1, 1 };
8570
8571 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8572 std::vector<uint8_t> input1({ 2 });
8573 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8574
Sadik Armagan2999a022019-04-09 14:20:12 +01008575 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8576 memoryManager,
8577 shape0, input0, 0.5f, 2,
8578 shape1, input1, 1.0f, 0,
8579 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008580}
8581
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008582LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8583 armnn::IWorkloadFactory& workloadFactory,
8584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008585{
8586 const unsigned int shape0[] = { 1, 1, 2, 2 };
8587 const unsigned int shape1[] = { 1, 1, 2, 1 };
8588
8589 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8590 std::vector<uint8_t> input1({ 2, 1 });
8591 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8592
Sadik Armagan2999a022019-04-09 14:20:12 +01008593 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8594 memoryManager,
8595 shape0, input0, 1.0f, 0,
8596 shape1, input1, 1.0f, 0,
8597 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008598}
8599
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008600LayerTestResult<float, 4> SubtractionTest(
8601 armnn::IWorkloadFactory& workloadFactory,
8602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008603{
8604 const unsigned int shape0[] = { 1, 1, 2, 2 };
8605 const unsigned int shape1[] = { 1, 1, 2, 2 };
8606
8607 std::vector<float> input0({ 1, 2, 3, 4 });
8608 std::vector<float> input1({ 1, -1, 0, 2 });
8609 std::vector<float> output({ 0, 3, 3, 2 });
8610
Sadik Armagan2999a022019-04-09 14:20:12 +01008611 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8612 memoryManager,
8613 shape0, input0, 1.0f, 0,
8614 shape1, input1, 1.0f, 0,
8615 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008616}
8617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008618LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8619 armnn::IWorkloadFactory& workloadFactory,
8620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008621{
8622 const unsigned int shape0[] = { 1, 1, 2, 2 };
8623 const unsigned int shape1[] = { 1, 1, 1, 1 };
8624
8625 std::vector<float> input0({ 1, 2, 3, 4 });
8626 std::vector<float> input1({ 10 });
8627 std::vector<float> output({ -9, -8, -7, -6 });
8628
Sadik Armagan2999a022019-04-09 14:20:12 +01008629 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8630 memoryManager,
8631 shape0, input0, 1.0f, 0,
8632 shape1, input1, 1.0f, 0,
8633 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008634}
8635
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008636LayerTestResult<float, 4> SubtractionBroadcastTest(
8637 armnn::IWorkloadFactory& workloadFactory,
8638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008639{
8640 const unsigned int shape0[] = { 1, 1, 2, 2 };
8641 const unsigned int shape1[] = { 1, 1, 1, 2 };
8642
8643 std::vector<float> input0({ 1, 2, 3, 4 });
8644 std::vector<float> input1({ 10, -5 });
8645 std::vector<float> output({ -9, 7, -7, 9 });
8646
Sadik Armagan2999a022019-04-09 14:20:12 +01008647 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8648 memoryManager,
8649 shape0, input0, 1.0f, 0,
8650 shape1, input1, 1.0f, 0,
8651 shape0, output, 1.0f, 0);
8652}
8653
8654LayerTestResult<int16_t, 4> SubtractionInt16Test(
8655 armnn::IWorkloadFactory& workloadFactory,
8656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8657{
8658 const unsigned int shape0[] = { 1, 1, 2, 2 };
8659 const unsigned int shape1[] = { 1, 1, 2, 2 };
8660
8661 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8662 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8663 std::vector<int16_t> output({ 3, 3, 5, 5 });
8664
8665 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8666 memoryManager,
8667 shape0, input0, 0.5f, 0,
8668 shape1, input1, 1.0f, 0,
8669 shape0, output, 1.0f, 0);
8670}
8671
8672LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8673 armnn::IWorkloadFactory& workloadFactory,
8674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8675{
8676 const unsigned int shape0[] = { 1, 1, 2, 2 };
8677 const unsigned int shape1[] = { 1, 1, 1, 1 };
8678
8679 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8680 std::vector<int16_t> input1({ 2 });
8681 std::vector<int16_t> output({ 3, 4, 5, 6 });
8682
8683 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8684 memoryManager,
8685 shape0, input0, 0.5f, 0,
8686 shape1, input1, 1.0f, 0,
8687 shape0, output, 1.0f, 0);
8688}
8689
8690LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8691 armnn::IWorkloadFactory& workloadFactory,
8692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8693{
8694 const unsigned int shape0[] = { 1, 1, 2, 2 };
8695 const unsigned int shape1[] = { 1, 1, 2, 1 };
8696
8697 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8698 std::vector<int16_t> input1({ 2, 1 });
8699 std::vector<int16_t> output({ 8, 11, 12, 15 });
8700
8701 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8702 memoryManager,
8703 shape0, input0, 1.0f, 0,
8704 shape1, input1, 1.0f, 0,
8705 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008706}
8707
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008708LayerTestResult<float, 4> BatchNormTest(
8709 armnn::IWorkloadFactory& workloadFactory,
8710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008711{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008712 // BatchSize: 1
8713 // Channels: 2
8714 // Height: 3
8715 // Width: 2
8716
8717 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8718 std::vector<float> inputValues
8719 {
8720 // Batch 0, Channel 0, Height (3) x Width (2)
8721 1.f, 4.f,
8722 4.f, 2.f,
8723 1.f, 6.f,
8724
8725 // Batch 0, Channel 1, Height (3) x Width (2)
8726 1.f, 1.f,
8727 4.f, 1.f,
8728 -2.f, 4.f
8729 };
8730 std::vector<float> expectedOutputValues
8731 {
8732 // Batch 0, Channel 0, Height (3) x Width (2)
8733 1.f, 4.f,
8734 4.f, 2.f,
8735 1.f, 6.f,
8736
8737 // Batch 0, Channel 1, Height (3) x Width (2)
8738 3.f, 3.f,
8739 4.f, 3.f,
8740 2.f, 4.f
8741 };
8742
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008743 return BatchNormTestImpl<armnn::DataType::Float32>(
8744 workloadFactory, memoryManager,
8745 inputOutputShape, inputValues, expectedOutputValues,
8746 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008747}
8748
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008749LayerTestResult<float, 4> BatchNormNhwcTest(
8750 armnn::IWorkloadFactory& workloadFactory,
8751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008752{
8753 // BatchSize: 1
8754 // Height: 3
8755 // Width: 2
8756 // Channels: 2
8757
8758 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8759 std::vector<float> inputValues
8760 {
8761 // Batch 0, Height 0, Width (2) x Channel (2)
8762 1.f, 1.f,
8763 4.f, 1.f,
8764
8765 // Batch 0, Height 1, Width (2) x Channel (2)
8766 4.f, 4.f,
8767 2.f, 1.f,
8768
8769 // Batch 0, Height 2, Width (2) x Channel (2)
8770 1.f, -2.f,
8771 6.f, 4.f
8772 };
8773 std::vector<float> expectedOutputValues
8774 {
8775 // Batch 0, Height 0, Width (2) x Channel (2)
8776 1.f, 3.f,
8777 4.f, 3.f,
8778
8779 // Batch 0, Height 1, Width (2) x Channel (2)
8780 4.f, 4.f,
8781 2.f, 3.f,
8782
8783 // Batch 0, Height 2, Width (2) x Channel (2)
8784 1.f, 2.f,
8785 6.f, 4.f
8786 };
8787
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008788 return BatchNormTestImpl<armnn::DataType::Float32>(
8789 workloadFactory, memoryManager,
8790 inputOutputShape, inputValues, expectedOutputValues,
8791 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008792}
8793
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008794LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8795 armnn::IWorkloadFactory& workloadFactory,
8796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008797{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008798 // BatchSize: 1
8799 // Channels: 2
8800 // Height: 3
8801 // Width: 2
8802
8803 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8804 std::vector<float> inputValues
8805 {
8806 // Batch 0, Channel 0, Height (3) x Width (2)
8807 1.f, 4.f,
8808 4.f, 2.f,
8809 1.f, 6.f,
8810
8811 // Batch 0, Channel 1, Height (3) x Width (2)
8812 1.f, 1.f,
8813 4.f, 1.f,
8814 -2.f, 4.f
8815 };
8816 std::vector<float> expectedOutputValues
8817 {
8818 // Batch 0, Channel 0, Height (3) x Width (2)
8819 1.f, 4.f,
8820 4.f, 2.f,
8821 1.f, 6.f,
8822
8823 // Batch 0, Channel 1, Height (3) x Width (2)
8824 3.f, 3.f,
8825 4.f, 3.f,
8826 2.f, 4.f
8827 };
8828
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008829 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8830 workloadFactory, memoryManager,
8831 inputOutputShape, inputValues, expectedOutputValues,
8832 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008833}
8834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008835LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8836 armnn::IWorkloadFactory& workloadFactory,
8837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008838{
8839 // BatchSize: 1
8840 // Height: 3
8841 // Width: 2
8842 // Channels: 2
8843
8844 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8845 std::vector<float> inputValues
8846 {
8847 // Batch 0, Height 0, Width (2) x Channel (2)
8848 1.f, 1.f,
8849 4.f, 1.f,
8850
8851 // Batch 0, Height 1, Width (2) x Channel (2)
8852 4.f, 4.f,
8853 2.f, 1.f,
8854
8855 // Batch 0, Height 2, Width (2) x Channel (2)
8856 1.f, -2.f,
8857 6.f, 4.f
8858 };
8859 std::vector<float> expectedOutputValues
8860 {
8861 // Batch 0, Height 0, Width (2) x Channel (2)
8862 1.f, 3.f,
8863 4.f, 3.f,
8864
8865 // Batch 0, Height 1, Width (2) x Channel (2)
8866 4.f, 4.f,
8867 2.f, 3.f,
8868
8869 // Batch 0, Height 2, Width (2) x Channel (2)
8870 1.f, 2.f,
8871 6.f, 4.f
8872 };
8873
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008874 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8875 (workloadFactory, memoryManager,
8876 inputOutputShape, inputValues, expectedOutputValues,
8877 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008878}
8879
Matteo Martincighf5507132019-06-04 10:59:47 +01008880LayerTestResult<int16_t, 4> BatchNormInt16Test(
8881 armnn::IWorkloadFactory& workloadFactory,
8882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8883{
8884 // BatchSize: 1
8885 // Channels: 2
8886 // Height: 3
8887 // Width: 2
8888
8889 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8890 std::vector<float> inputValues
8891 {
8892 // Batch 0, Channel 0, Height (3) x Width (2)
8893 1.f, 4.f,
8894 4.f, 2.f,
8895 1.f, 6.f,
8896
8897 // Batch 0, Channel 1, Height (3) x Width (2)
8898 1.f, 1.f,
8899 4.f, 1.f,
8900 -2.f, 4.f
8901 };
8902 std::vector<float> expectedOutputValues
8903 {
8904 // Batch 0, Channel 0, Height (3) x Width (2)
8905 1.f, 4.f,
8906 4.f, 2.f,
8907 1.f, 6.f,
8908
8909 // Batch 0, Channel 1, Height (3) x Width (2)
8910 3.f, 3.f,
8911 4.f, 3.f,
8912 2.f, 4.f
8913 };
8914
8915 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8916 workloadFactory, memoryManager,
8917 inputOutputShape, inputValues, expectedOutputValues,
8918 1.f/20.f, 50, armnn::DataLayout::NCHW);
8919}
8920
8921LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8922 armnn::IWorkloadFactory& workloadFactory,
8923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8924{
8925 // BatchSize: 1
8926 // Height: 3
8927 // Width: 2
8928 // Channels: 2
8929
8930 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8931 std::vector<float> inputValues
8932 {
8933 // Batch 0, Height 0, Width (2) x Channel (2)
8934 1.f, 1.f,
8935 4.f, 1.f,
8936
8937 // Batch 0, Height 1, Width (2) x Channel (2)
8938 4.f, 4.f,
8939 2.f, 1.f,
8940
8941 // Batch 0, Height 2, Width (2) x Channel (2)
8942 1.f, -2.f,
8943 6.f, 4.f
8944 };
8945 std::vector<float> expectedOutputValues
8946 {
8947 // Batch 0, Height 0, Width (2) x Channel (2)
8948 1.f, 3.f,
8949 4.f, 3.f,
8950
8951 // Batch 0, Height 1, Width (2) x Channel (2)
8952 4.f, 4.f,
8953 2.f, 3.f,
8954
8955 // Batch 0, Height 2, Width (2) x Channel (2)
8956 1.f, 2.f,
8957 6.f, 4.f
8958 };
8959
8960 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8961 (workloadFactory, memoryManager,
8962 inputOutputShape, inputValues, expectedOutputValues,
8963 1.f/20.f, 50, armnn::DataLayout::NHWC);
8964}
8965
Nina Drozd58ef2c62019-05-16 12:09:18 +01008966LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008967 armnn::IWorkloadFactory& workloadFactory,
8968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008969{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008970 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008971}
8972
Nina Drozd58ef2c62019-05-16 12:09:18 +01008973LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8974 armnn::IWorkloadFactory& workloadFactory,
8975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8976{
8977 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8978}
8979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008980LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8981 armnn::IWorkloadFactory& workloadFactory,
8982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008984 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008985}
8986
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008987LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8988 armnn::IWorkloadFactory& workloadFactory,
8989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008990{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008991 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008992}
8993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008994LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8995 armnn::IWorkloadFactory& workloadFactory,
8996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008997{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008998 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008999}
9000
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009001LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
9002 armnn::IWorkloadFactory& workloadFactory,
9003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009004{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009005 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9006 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009007}
9008
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009009LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
9010 armnn::IWorkloadFactory& workloadFactory,
9011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009012{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009013 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9014 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009015}
9016
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009017LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
9018 armnn::IWorkloadFactory& workloadFactory,
9019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009020{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009021 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009022}
9023
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009024LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
9025 armnn::IWorkloadFactory& workloadFactory,
9026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009027{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009028 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009029}
9030
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009031LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
9032 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9034 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00009035{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009036 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9037 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009038}
9039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009040LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
9041 armnn::IWorkloadFactory& workloadFactory,
9042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009043{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009044 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009045}
9046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009047LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
9048 armnn::IWorkloadFactory& workloadFactory,
9049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009050{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009051 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9052 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009053}
9054
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009055LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
9056 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9058 bool useSubtensor)
9059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009060 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9061 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009062}
9063
9064LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9065 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009067{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009068 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009069}
9070
9071LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9072 armnn::IWorkloadFactory& workloadFactory,
9073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9074{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009075 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009076}
9077
9078LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9079 armnn::IWorkloadFactory& workloadFactory,
9080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9081{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009082 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009083}
9084
9085LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9086 armnn::IWorkloadFactory& workloadFactory,
9087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9088{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009089 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9090 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00009091}
9092
9093LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9094 armnn::IWorkloadFactory& workloadFactory,
9095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9096{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009097 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9098 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009099}
9100
9101LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9102 armnn::IWorkloadFactory& workloadFactory,
9103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9104{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009105 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9106 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009107}
9108
9109LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9110 armnn::IWorkloadFactory& workloadFactory,
9111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9112{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009113 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9114 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009115}
9116
9117LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9118 armnn::IWorkloadFactory& workloadFactory,
9119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9120 bool useSubtensor)
9121{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009122 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9123 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00009124}
9125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009126LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9127 armnn::IWorkloadFactory& workloadFactory,
9128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9129 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009130{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009131 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9132 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009133}
9134
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009135LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9136 armnn::IWorkloadFactory& workloadFactory,
9137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9138 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009139{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009140 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009141 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00009142}
9143
Teresa Charlin0434df62019-06-06 13:40:35 +01009144LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9145 armnn::IWorkloadFactory& workloadFactory,
9146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9147 bool forceNoPadding)
9148{
9149 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9150 workloadFactory, memoryManager, forceNoPadding);
9151}
9152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009153LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9154 armnn::IWorkloadFactory& workloadFactory,
9155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9156 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009157{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009158 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9159 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009160}
9161
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009162LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9163 armnn::IWorkloadFactory& workloadFactory,
9164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9165 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009166{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009167 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009168 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009169}
9170
Teresa Charlin0434df62019-06-06 13:40:35 +01009171LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9172 armnn::IWorkloadFactory& workloadFactory,
9173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9174 bool forceNoPadding)
9175{
9176 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9177 workloadFactory, memoryManager, forceNoPadding);
9178}
9179
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009180LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9181 armnn::IWorkloadFactory& workloadFactory,
9182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009183 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009184{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009185 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009186}
9187
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009188LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9189 armnn::IWorkloadFactory& workloadFactory,
9190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009191 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01009192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009193 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01009194}
9195
Teresa Charlin0434df62019-06-06 13:40:35 +01009196LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9197 armnn::IWorkloadFactory& workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9199 const armnn::DataLayout dataLayout)
9200{
9201 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9202}
9203LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9204 armnn::IWorkloadFactory& workloadFactory,
9205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9206{
9207 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9208}
9209
9210LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9211 armnn::IWorkloadFactory& workloadFactory,
9212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9213{
9214 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9215 workloadFactory, memoryManager, 1.0f, -5);
9216}
9217
9218LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9219 armnn::IWorkloadFactory& workloadFactory,
9220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9221{
9222 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9223 workloadFactory, memoryManager);
9224}
9225
9226LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9229{
9230 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9231}
9232
9233LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9234 armnn::IWorkloadFactory& workloadFactory,
9235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9236{
9237 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9238 workloadFactory, memoryManager, 1.0f, -5);
9239}
9240
9241LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9242 armnn::IWorkloadFactory& workloadFactory,
9243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9244{
9245 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9246 workloadFactory, memoryManager);
9247}
9248
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009249LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9250 armnn::IWorkloadFactory& workloadFactory,
9251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009252 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009253{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009254 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01009255}
9256
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009257LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9258 armnn::IWorkloadFactory& workloadFactory,
9259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009260 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01009261{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009262 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009263 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009264}
9265
Teresa Charlin0434df62019-06-06 13:40:35 +01009266LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9267 armnn::IWorkloadFactory& workloadFactory,
9268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9269 const armnn::DataLayout dataLayout)
9270{
9271 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9272 workloadFactory, memoryManager, dataLayout);
9273}
9274
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009275LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9276 armnn::IWorkloadFactory& workloadFactory,
9277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9278 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01009279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009280 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009281 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01009282}
9283
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009284LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9285 armnn::IWorkloadFactory& workloadFactory,
9286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009287{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009288 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009289}
9290
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009291LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9292 armnn::IWorkloadFactory& workloadFactory,
9293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009294{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009295 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9296 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009297}
9298
Teresa Charlin0434df62019-06-06 13:40:35 +01009299LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9300 armnn::IWorkloadFactory& workloadFactory,
9301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9302{
9303 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9304 workloadFactory, memoryManager);
9305}
9306LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9307 armnn::IWorkloadFactory& workloadFactory,
9308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9309{
9310 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9311}
9312
9313LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9314 armnn::IWorkloadFactory& workloadFactory,
9315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9316{
9317 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9318 workloadFactory, memoryManager);
9319}
9320
9321LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9322 armnn::IWorkloadFactory& workloadFactory,
9323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9324{
9325 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9326 workloadFactory, memoryManager);
9327}
9328
9329LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9330 armnn::IWorkloadFactory& workloadFactory,
9331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9332{
9333 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9334 workloadFactory, memoryManager);
9335}
9336
9337LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9338 armnn::IWorkloadFactory& workloadFactory,
9339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9340{
9341 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9342 workloadFactory, memoryManager);
9343}
9344
9345LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9346 armnn::IWorkloadFactory& workloadFactory,
9347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9348{
9349 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9350 workloadFactory, memoryManager);
9351}
9352
9353LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9354 armnn::IWorkloadFactory& workloadFactory,
9355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9356{
9357 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9358}
9359
9360LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9361 armnn::IWorkloadFactory& workloadFactory,
9362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9363{
9364 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9365 workloadFactory, memoryManager);
9366}
9367
9368LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9369 armnn::IWorkloadFactory& workloadFactory,
9370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9371{
9372 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9373 workloadFactory, memoryManager);
9374}
9375
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009376LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9377 armnn::IWorkloadFactory& workloadFactory,
9378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009379 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009381 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009382}
9383
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009384LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9385 armnn::IWorkloadFactory& workloadFactory,
9386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009387 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009388{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009389 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009390}
9391
Teresa Charlin0434df62019-06-06 13:40:35 +01009392LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9393 armnn::IWorkloadFactory& workloadFactory,
9394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9395 const armnn::DataLayout dataLayout)
9396{
9397 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9398}
9399
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009400LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9401 armnn::IWorkloadFactory& workloadFactory,
9402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009403{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009404 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009405}
9406
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009407LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9408 armnn::IWorkloadFactory& workloadFactory,
9409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009411 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009412}
9413
Teresa Charlin0434df62019-06-06 13:40:35 +01009414LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9415 armnn::IWorkloadFactory& workloadFactory,
9416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9417{
9418 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9419}
9420
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009421LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9422 armnn::IWorkloadFactory& workloadFactory,
9423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009425 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009426}
9427
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009428LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9429 armnn::IWorkloadFactory& workloadFactory,
9430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009431{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009432 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009433}
9434
Teresa Charlin0434df62019-06-06 13:40:35 +01009435LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9436 armnn::IWorkloadFactory& workloadFactory,
9437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9438{
9439 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9440}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009441LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9442 armnn::IWorkloadFactory& workloadFactory,
9443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009444{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009445 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009446}
9447
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009448LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9449 armnn::IWorkloadFactory& workloadFactory,
9450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009451{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009452 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009453}
9454
Teresa Charlin0434df62019-06-06 13:40:35 +01009455LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9456 armnn::IWorkloadFactory& workloadFactory,
9457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9458{
9459 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9460}
9461
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009462LayerTestResult<float, 4> L2Pooling2dSize7Test(
9463 armnn::IWorkloadFactory& workloadFactory,
9464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009465{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009466 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009467}
9468
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009469LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9470 armnn::IWorkloadFactory& workloadFactory,
9471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009472{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009473 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009474}
9475
Teresa Charlin0434df62019-06-06 13:40:35 +01009476LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9477 armnn::IWorkloadFactory& workloadFactory,
9478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9479{
9480 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9481}
9482
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009483LayerTestResult<float, 4> L2Pooling2dSize9Test(
9484 armnn::IWorkloadFactory& workloadFactory,
9485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009487 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009488}
9489
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009490LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9491 armnn::IWorkloadFactory& workloadFactory,
9492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009493{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009494 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009495}
9496
Teresa Charlin0434df62019-06-06 13:40:35 +01009497LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9498 armnn::IWorkloadFactory& workloadFactory,
9499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9500{
9501 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9502}
9503LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9504 armnn::IWorkloadFactory& workloadFactory,
9505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9506{
9507 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9508}
9509
9510LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9511 armnn::IWorkloadFactory& workloadFactory,
9512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9513{
9514 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9515}
9516
9517LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9518 armnn::IWorkloadFactory& workloadFactory,
9519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9520{
9521 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9522}
9523
9524LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9525 armnn::IWorkloadFactory& workloadFactory,
9526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9527{
9528 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9529}
9530
9531LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9532 armnn::IWorkloadFactory& workloadFactory,
9533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9534{
9535 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9536}
9537
9538LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9539 armnn::IWorkloadFactory& workloadFactory,
9540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9541{
9542 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9543}
9544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009545LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9546 armnn::IWorkloadFactory& workloadFactory,
9547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009548{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009549 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009550}
9551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009552LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9553 armnn::IWorkloadFactory& workloadFactory,
9554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009555{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009556 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009557}
9558
Teresa Charlin0434df62019-06-06 13:40:35 +01009559LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9560 armnn::IWorkloadFactory& workloadFactory,
9561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9562{
9563 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9564}
9565
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009566LayerTestResult<float, 4> ComparePooling2dTest(
9567 armnn::IWorkloadFactory& workloadFactory,
9568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9569 armnn::IWorkloadFactory& refWorkloadFactory,
9570 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009571{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009572 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009573 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009574}
9575
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009576LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9577 armnn::IWorkloadFactory& workloadFactory,
9578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9579 armnn::IWorkloadFactory& refWorkloadFactory,
9580 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009582 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009583 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009584}
9585
Teresa Charlin0434df62019-06-06 13:40:35 +01009586LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9587 armnn::IWorkloadFactory& workloadFactory,
9588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9589 armnn::IWorkloadFactory& refWorkloadFactory,
9590 armnn::PoolingAlgorithm poolingType)
9591{
9592 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9593 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9594}
9595
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009596LayerTestResult<float, 2> FullyConnectedLargeTest(
9597 armnn::IWorkloadFactory& workloadFactory,
9598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9599 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009600{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009601 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009602}
9603
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009604LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9605 armnn::IWorkloadFactory& workloadFactory,
9606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009607{
9608 // Create Initial Tensor
9609 // 1, 2, 3
9610 // 4, 5, 6
9611 // 7, 8, 9
9612
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009613 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9614 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009615
9616 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9617 {1, 2, 3,
9618 4, 5, 6,
9619 7, 8, 9
9620 });
9621
9622 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9623 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9624 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9625 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9626
9627 // Apply MaxPool poolSize = 1x1, stride=2x2
9628 // Result =
9629 // 1, 3
9630 // 7, 9
9631 armnn::Pooling2dDescriptor descriptor;
9632 descriptor.m_PoolHeight = 1;
9633 descriptor.m_PoolWidth = 1;
9634 descriptor.m_StrideX = 2;
9635 descriptor.m_StrideY = 2;
9636 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9637
9638 armnn::Pooling2dQueueDescriptor queueDescriptor;
9639 queueDescriptor.m_Parameters = descriptor;
9640 armnn::WorkloadInfo workloadInfo;
9641 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9642 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9643
9644 // Create the MaxPool
9645 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9646
9647 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9648 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9649 boost::multi_array<float, 4> resultMaxPool;
9650 resultMaxPool.resize(shape);
9651
9652
9653 // Create addition with another tensor the same size
9654 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9655 // with the initial tensor.
9656 // 12, 16
9657 // 24, 28
9658
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009659 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9660 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009661
9662 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9663 {12, 16,
9664 24, 28,
9665 });
9666
9667 // Expected output tensor after MaxPool and Addition.
9668 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9669 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9670 {
9671 13, 19,
9672 31, 37
9673 }));
9674
9675 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9676 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9677
9678 armnn::AdditionQueueDescriptor data;
9679 armnn::WorkloadInfo info;
9680
9681 // Add the output of the MaxPool and the new tensor
9682 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9683 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9684 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9685
9686 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9687
9688 poolingInputHandle->Allocate();
9689 poolingOutputHandle->Allocate();
9690 addInputHandle->Allocate();
9691 addOutputHandle->Allocate();
9692
9693 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9694 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9695
9696 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9697 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9698
Derek Lambertif30f7d32019-04-09 10:25:02 +01009699 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009700 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009701 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009702 addWorkload->Execute();
9703
9704 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9705
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009706 return addRet;
9707}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009708
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009709LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9710 armnn::IWorkloadFactory& workloadFactory,
9711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009712{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009713 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009714}
9715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009716LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9717 armnn::IWorkloadFactory& workloadFactory,
9718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009719{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009720 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009721}
9722
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009723LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9724 armnn::IWorkloadFactory& workloadFactory,
9725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009726{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009727 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009728}
9729
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009730LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9731 armnn::IWorkloadFactory& workloadFactory,
9732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009733{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009734 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009735}
9736
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009737LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9738 armnn::IWorkloadFactory& workloadFactory,
9739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009740{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009741 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009742}
9743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009744LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9745 armnn::IWorkloadFactory& workloadFactory,
9746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009748 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009749}
9750
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009751LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9752 armnn::IWorkloadFactory& workloadFactory,
9753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009755 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009756}
9757
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009758LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9759 armnn::IWorkloadFactory& workloadFactory,
9760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009762 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009763}
9764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009765LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9766 armnn::IWorkloadFactory& workloadFactory,
9767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009769 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009770}
9771
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009772LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9773 armnn::IWorkloadFactory& workloadFactory,
9774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009775{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009776 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009777}
9778
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009779LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9780 armnn::IWorkloadFactory& workloadFactory,
9781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009782{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009783 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009784}
9785
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009786LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9787 armnn::IWorkloadFactory& workloadFactory,
9788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009789{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009790 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009791}
9792
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009793LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9794 armnn::IWorkloadFactory& workloadFactory,
9795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009796{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009797 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009798}
9799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009800LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9801 armnn::IWorkloadFactory& workloadFactory,
9802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009804 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009805}
9806
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009807LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9808 armnn::IWorkloadFactory& workloadFactory,
9809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009810{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009811 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009812}
9813
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009814LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9815 armnn::IWorkloadFactory& workloadFactory,
9816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009817{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009818 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009819}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009820
nikraj01120522a2019-05-31 11:33:07 +01009821LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9822 armnn::IWorkloadFactory& workloadFactory,
9823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824{
9825 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9826}
9827
9828LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9829 armnn::IWorkloadFactory& workloadFactory,
9830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9831{
9832 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9833}
9834
9835LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9836 armnn::IWorkloadFactory& workloadFactory,
9837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9838{
9839 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9840}
9841
9842LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9843 armnn::IWorkloadFactory& workloadFactory,
9844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9845{
9846 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9847}
9848
9849LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9850 armnn::IWorkloadFactory& workloadFactory,
9851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9852{
9853 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9854}
9855
9856LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9857 armnn::IWorkloadFactory& workloadFactory,
9858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9859{
9860 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9861}
9862
9863LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9864 armnn::IWorkloadFactory& workloadFactory,
9865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9866{
9867 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9868}
9869
9870LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9871 armnn::IWorkloadFactory& workloadFactory,
9872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9873{
9874 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9875}
9876
Keith Davisa57eccb2019-06-14 17:33:22 +01009877LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9878 armnn::IWorkloadFactory& workloadFactory,
9879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9880{
James Conroyd2aa85e2019-07-01 17:12:40 +01009881 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009882 workloadFactory,
9883 memoryManager);
9884}
9885
9886LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9887 armnn::IWorkloadFactory& workloadFactory,
9888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9889{
James Conroyd2aa85e2019-07-01 17:12:40 +01009890 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009891 workloadFactory,
9892 memoryManager,
9893 armnn::DataLayout::NCHW);
9894}
9895
James Conroyd2aa85e2019-07-01 17:12:40 +01009896LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009897 armnn::IWorkloadFactory& workloadFactory,
9898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9899{
James Conroyd2aa85e2019-07-01 17:12:40 +01009900 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009901 workloadFactory,
9902 memoryManager);
9903}
9904
James Conroyd2aa85e2019-07-01 17:12:40 +01009905LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009906 armnn::IWorkloadFactory& workloadFactory,
9907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9908{
James Conroyd2aa85e2019-07-01 17:12:40 +01009909 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9910 workloadFactory,
9911 memoryManager,
9912 armnn::DataLayout::NCHW);
9913}
9914
9915LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9916 armnn::IWorkloadFactory& workloadFactory,
9917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9918{
9919 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9920 workloadFactory,
9921 memoryManager);
9922}
9923
9924LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9925 armnn::IWorkloadFactory& workloadFactory,
9926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9927{
9928 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9929 workloadFactory,
9930 memoryManager,
9931 armnn::DataLayout::NCHW);
9932}
9933
9934LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9935 armnn::IWorkloadFactory& workloadFactory,
9936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9937{
9938 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9939 workloadFactory,
9940 memoryManager);
9941}
9942
9943LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9944 armnn::IWorkloadFactory& workloadFactory,
9945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9946{
9947 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009948 workloadFactory,
9949 memoryManager,
9950 armnn::DataLayout::NCHW);
9951}
9952
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009953namespace {
9954
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009955} // anonymous namespace
9956
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009957LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9958 armnn::IWorkloadFactory& workloadFactory,
9959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9960{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009961 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009962}
9963
9964LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9965 armnn::IWorkloadFactory& workloadFactory,
9966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9967{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009968 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009969}
9970
9971LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9972 armnn::IWorkloadFactory& workloadFactory,
9973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009975 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009976}
9977
9978LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9979 armnn::IWorkloadFactory& workloadFactory,
9980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9981{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009982 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009983}
9984
9985LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9986 armnn::IWorkloadFactory& workloadFactory,
9987 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9988{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009989 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009990}
9991
9992LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9993 armnn::IWorkloadFactory& workloadFactory,
9994 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9995{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009996 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009997}
9998
9999LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
10000 armnn::IWorkloadFactory& workloadFactory,
10001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10002{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010003 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010004}
10005
10006LayerTestResult<float, 2> StridedSlice2DFloat32Test(
10007 armnn::IWorkloadFactory& workloadFactory,
10008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010010 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010011}
10012
10013LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
10014 armnn::IWorkloadFactory& workloadFactory,
10015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10016{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010017 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010018}
10019
10020LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
10021 armnn::IWorkloadFactory& workloadFactory,
10022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10023{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010024 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010025}
10026
10027LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
10028 armnn::IWorkloadFactory& workloadFactory,
10029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10030{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010031 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010032}
10033
10034LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
10035 armnn::IWorkloadFactory& workloadFactory,
10036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10037{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010038 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010039}
10040
10041LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
10042 armnn::IWorkloadFactory& workloadFactory,
10043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010045 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010046}
10047
10048LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
10049 armnn::IWorkloadFactory& workloadFactory,
10050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10051{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010052 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010053}
10054
10055LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
10056 armnn::IWorkloadFactory& workloadFactory,
10057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10058{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010059 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010060}
10061
10062LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
10063 armnn::IWorkloadFactory& workloadFactory,
10064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10065{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010066 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010067}
10068
10069LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10070 armnn::IWorkloadFactory& workloadFactory,
10071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10072{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010073 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010074}
10075
10076LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10077 armnn::IWorkloadFactory& workloadFactory,
10078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10079{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010080 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010081}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010082
Matteo Martincigh42666a12019-05-29 08:53:41 +010010083LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10084 armnn::IWorkloadFactory& workloadFactory,
10085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10086{
10087 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10088}
10089
10090LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10091 armnn::IWorkloadFactory& workloadFactory,
10092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10093{
10094 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10095}
10096
10097LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10098 armnn::IWorkloadFactory& workloadFactory,
10099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10100{
10101 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10102}
10103
10104LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10105 armnn::IWorkloadFactory& workloadFactory,
10106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10107{
10108 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10109}
10110
10111LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10112 armnn::IWorkloadFactory& workloadFactory,
10113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10114{
10115 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10116}
10117
10118LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10119 armnn::IWorkloadFactory& workloadFactory,
10120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10121{
10122 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10123}
10124
10125LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10126 armnn::IWorkloadFactory& workloadFactory,
10127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10128{
10129 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10130}
10131
10132LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10133 armnn::IWorkloadFactory& workloadFactory,
10134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10135{
10136 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10137}
10138
10139LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10140 armnn::IWorkloadFactory& workloadFactory,
10141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10142{
10143 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10144}
10145
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010146LayerTestResult<float, 4> Debug4DFloat32Test(
10147 armnn::IWorkloadFactory& workloadFactory,
10148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10149{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010150 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010151}
10152
10153LayerTestResult<float, 3> Debug3DFloat32Test(
10154 armnn::IWorkloadFactory& workloadFactory,
10155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10156{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010157 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010158}
10159
10160LayerTestResult<float, 2> Debug2DFloat32Test(
10161 armnn::IWorkloadFactory& workloadFactory,
10162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10163{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010164 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010165}
10166
10167LayerTestResult<float, 1> Debug1DFloat32Test(
10168 armnn::IWorkloadFactory& workloadFactory,
10169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10170{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010171 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010172}
10173
10174LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10175 armnn::IWorkloadFactory& workloadFactory,
10176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10177{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010178 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010179}
10180
10181LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10182 armnn::IWorkloadFactory& workloadFactory,
10183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10184{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010185 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010186}
10187
10188LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10189 armnn::IWorkloadFactory& workloadFactory,
10190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10191{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010192 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010193}
10194
10195LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10196 armnn::IWorkloadFactory& workloadFactory,
10197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10198{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010199 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010200}
Matteo Martincigh49124022019-01-11 13:25:59 +000010201
narpra014951d842019-01-18 16:53:53 +000010202LayerTestResult<float, 1> Gather1DParamsFloatTest(
10203 armnn::IWorkloadFactory& workloadFactory,
10204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10205{
10206 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10207}
10208
10209LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10210 armnn::IWorkloadFactory& workloadFactory,
10211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10212{
10213 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10214}
10215
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010216LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10217 armnn::IWorkloadFactory& workloadFactory,
10218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10219{
10220 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10221}
10222
narpra014951d842019-01-18 16:53:53 +000010223LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10224 armnn::IWorkloadFactory& workloadFactory,
10225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10226{
10227 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10228}
10229
10230LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10231 armnn::IWorkloadFactory& workloadFactory,
10232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10233{
10234 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10235}
10236
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010237LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10238 armnn::IWorkloadFactory& workloadFactory,
10239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10240{
10241 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10242}
10243
narpra014951d842019-01-18 16:53:53 +000010244LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10245 armnn::IWorkloadFactory& workloadFactory,
10246 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10247{
10248 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10249}
10250
10251LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10252 armnn::IWorkloadFactory& workloadFactory,
10253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10254{
10255 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10256 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +000010257}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010258
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010259LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10260 armnn::IWorkloadFactory& workloadFactory,
10261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10262{
10263 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10264 workloadFactory, memoryManager);
10265}
10266
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010267LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010268 armnn::IWorkloadFactory& workloadFactory,
10269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10270{
10271 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10272}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010273
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010274LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10275 armnn::IWorkloadFactory& workloadFactory,
10276 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10277{
10278 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10279}
10280
10281LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10282 armnn::IWorkloadFactory& workloadFactory,
10283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10284{
10285 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10286}
10287
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010288LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10289 armnn::IWorkloadFactory& workloadFactory,
10290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10291{
10292 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10293}
10294
10295LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10296 armnn::IWorkloadFactory& workloadFactory,
10297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10298{
10299 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10300}
10301
10302LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10303 armnn::IWorkloadFactory& workloadFactory,
10304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10305{
10306 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10307}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010010308
10309//
10310// TransposeConvolution2d
10311//
10312
10313// Simple biased
10314LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
10315 armnn::IWorkloadFactory& workloadFactory,
10316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10317{
10318 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10319 workloadFactory,
10320 memoryManager,
10321 true,
10322 armnn::DataLayout::NCHW);
10323}
10324
10325LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
10326 armnn::IWorkloadFactory& workloadFactory,
10327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10328{
10329 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10330 workloadFactory,
10331 memoryManager,
10332 true,
10333 armnn::DataLayout::NHWC);
10334}
10335
10336LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
10337 armnn::IWorkloadFactory& workloadFactory,
10338 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10339{
10340 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10341 workloadFactory,
10342 memoryManager,
10343 true,
10344 armnn::DataLayout::NCHW);
10345}
10346
10347LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
10348 armnn::IWorkloadFactory& workloadFactory,
10349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10350{
10351 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10352 workloadFactory,
10353 memoryManager,
10354 true,
10355 armnn::DataLayout::NHWC);
10356}
10357
10358LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
10359 armnn::IWorkloadFactory& workloadFactory,
10360 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10361{
10362 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10363 workloadFactory,
10364 memoryManager,
10365 true,
10366 armnn::DataLayout::NCHW);
10367}
10368
10369LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
10370 armnn::IWorkloadFactory& workloadFactory,
10371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10372{
10373 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10374 workloadFactory,
10375 memoryManager,
10376 true,
10377 armnn::DataLayout::NHWC);
10378}
10379
10380// Simple unbiased
10381LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
10382 armnn::IWorkloadFactory& workloadFactory,
10383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10384{
10385 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10386 workloadFactory,
10387 memoryManager,
10388 false,
10389 armnn::DataLayout::NCHW);
10390}
10391
10392LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
10393 armnn::IWorkloadFactory& workloadFactory,
10394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10395{
10396 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10397 workloadFactory,
10398 memoryManager,
10399 false,
10400 armnn::DataLayout::NHWC);
10401}
10402
10403LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10404 armnn::IWorkloadFactory& workloadFactory,
10405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10406{
10407 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10408 workloadFactory,
10409 memoryManager,
10410 false,
10411 armnn::DataLayout::NCHW);
10412}
10413
10414LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10415 armnn::IWorkloadFactory& workloadFactory,
10416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10417{
10418 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10419 workloadFactory,
10420 memoryManager,
10421 false,
10422 armnn::DataLayout::NHWC);
10423}
10424
10425LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10426 armnn::IWorkloadFactory& workloadFactory,
10427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10428{
10429 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10430 workloadFactory,
10431 memoryManager,
10432 false,
10433 armnn::DataLayout::NCHW);
10434}
10435
10436LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10437 armnn::IWorkloadFactory& workloadFactory,
10438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10439{
10440 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10441 workloadFactory,
10442 memoryManager,
10443 false,
10444 armnn::DataLayout::NHWC);
10445}
10446
10447// Padded biased
10448LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10449 armnn::IWorkloadFactory& workloadFactory,
10450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10451{
10452 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10453 workloadFactory,
10454 memoryManager,
10455 true,
10456 armnn::DataLayout::NCHW);
10457}
10458
10459LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10460 armnn::IWorkloadFactory& workloadFactory,
10461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10462{
10463 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10464 workloadFactory,
10465 memoryManager,
10466 true,
10467 armnn::DataLayout::NHWC);
10468}
10469
10470LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10471 armnn::IWorkloadFactory& workloadFactory,
10472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10473{
10474 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10475 workloadFactory,
10476 memoryManager,
10477 true,
10478 armnn::DataLayout::NCHW);
10479}
10480
10481LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10482 armnn::IWorkloadFactory& workloadFactory,
10483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10484{
10485 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10486 workloadFactory,
10487 memoryManager,
10488 true,
10489 armnn::DataLayout::NHWC);
10490}
10491
10492LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10493 armnn::IWorkloadFactory& workloadFactory,
10494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10495{
10496 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10497 workloadFactory,
10498 memoryManager,
10499 true,
10500 armnn::DataLayout::NCHW);
10501}
10502
10503LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10504 armnn::IWorkloadFactory& workloadFactory,
10505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10506{
10507 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10508 workloadFactory,
10509 memoryManager,
10510 true,
10511 armnn::DataLayout::NHWC);
10512}
10513
10514// Padded unbiased
10515LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10516 armnn::IWorkloadFactory& workloadFactory,
10517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10518{
10519 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10520 workloadFactory,
10521 memoryManager,
10522 false,
10523 armnn::DataLayout::NCHW);
10524}
10525
10526LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10527 armnn::IWorkloadFactory& workloadFactory,
10528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10529{
10530 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10531 workloadFactory,
10532 memoryManager,
10533 false,
10534 armnn::DataLayout::NHWC);
10535}
10536
10537LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10538 armnn::IWorkloadFactory& workloadFactory,
10539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10540{
10541 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10542 workloadFactory,
10543 memoryManager,
10544 false,
10545 armnn::DataLayout::NCHW);
10546}
10547
10548LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10549 armnn::IWorkloadFactory& workloadFactory,
10550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10551{
10552 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10553 workloadFactory,
10554 memoryManager,
10555 false,
10556 armnn::DataLayout::NHWC);
10557}
10558
10559LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10560 armnn::IWorkloadFactory& workloadFactory,
10561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10562{
10563 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10564 workloadFactory,
10565 memoryManager,
10566 false,
10567 armnn::DataLayout::NCHW);
10568}
10569
10570LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10571 armnn::IWorkloadFactory& workloadFactory,
10572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10573{
10574 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10575 workloadFactory,
10576 memoryManager,
10577 false,
10578 armnn::DataLayout::NHWC);
10579}
10580
10581// Strided biased
10582LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10583 armnn::IWorkloadFactory& workloadFactory,
10584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10585{
10586 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10587 workloadFactory,
10588 memoryManager,
10589 true,
10590 armnn::DataLayout::NCHW);
10591}
10592
10593LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10594 armnn::IWorkloadFactory& workloadFactory,
10595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10596{
10597 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10598 workloadFactory,
10599 memoryManager,
10600 true,
10601 armnn::DataLayout::NHWC);
10602}
10603
10604LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10605 armnn::IWorkloadFactory& workloadFactory,
10606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10607{
10608 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10609 workloadFactory,
10610 memoryManager,
10611 true,
10612 armnn::DataLayout::NCHW);
10613}
10614
10615LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10616 armnn::IWorkloadFactory& workloadFactory,
10617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10618{
10619 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10620 workloadFactory,
10621 memoryManager,
10622 true,
10623 armnn::DataLayout::NHWC);
10624}
10625
10626LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10627 armnn::IWorkloadFactory& workloadFactory,
10628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10629{
10630 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10631 workloadFactory,
10632 memoryManager,
10633 true,
10634 armnn::DataLayout::NCHW);
10635}
10636
10637LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10638 armnn::IWorkloadFactory& workloadFactory,
10639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10640{
10641 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10642 workloadFactory,
10643 memoryManager,
10644 true,
10645 armnn::DataLayout::NHWC);
10646}
10647
10648// Strided unbiased
10649LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10650 armnn::IWorkloadFactory& workloadFactory,
10651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10652{
10653 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10654 workloadFactory,
10655 memoryManager,
10656 false,
10657 armnn::DataLayout::NCHW);
10658}
10659
10660LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10661 armnn::IWorkloadFactory& workloadFactory,
10662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10663{
10664 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10665 workloadFactory,
10666 memoryManager,
10667 false,
10668 armnn::DataLayout::NHWC);
10669}
10670
10671LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10672 armnn::IWorkloadFactory& workloadFactory,
10673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10674{
10675 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10676 workloadFactory,
10677 memoryManager,
10678 false,
10679 armnn::DataLayout::NCHW);
10680}
10681
10682LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10683 armnn::IWorkloadFactory& workloadFactory,
10684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10685{
10686 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10687 workloadFactory,
10688 memoryManager,
10689 false,
10690 armnn::DataLayout::NHWC);
10691}
10692
10693LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10694 armnn::IWorkloadFactory& workloadFactory,
10695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10696{
10697 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10698 workloadFactory,
10699 memoryManager,
10700 false,
10701 armnn::DataLayout::NCHW);
10702}
10703
10704LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10705 armnn::IWorkloadFactory& workloadFactory,
10706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10707{
10708 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10709 workloadFactory,
10710 memoryManager,
10711 false,
10712 armnn::DataLayout::NHWC);
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +010010713}