blob: f43121696961375d323a940d61b98bfd928db5bb [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
Francis Murtagh07f21212019-07-23 09:50:50 +010080struct Simple3dSoftmaxOutputData
81{
82 const std::vector<float> outputData =
83 {
84 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86 };
87
88 const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90 const std::vector<float> inputData =
91 {
92 0.f, 1.f, 0.f, 0.f,
93 .5f, 0.f, 0.f, 0.f,
94 };
95};
96
97struct Simple4dSoftmaxData
98{
99 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101 const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103 const std::vector<float> inputData =
104 {
105 0.f, 1.f, 0.f, 0.f,
106 .5f, 0.f, 0.f, 0.f
107 };
108};
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +0100112boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +0000113{
114 if(biasEnabled)
115 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +0100117 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +0000118 return bias;
119 }
120 else
121 {
122 return boost::multi_array<T, 1>();
123 }
124}
125
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000126template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000127LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130 float qScale,
131 int32_t qOffset,
132 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000133 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000134{
telsoa01c577f2c2018-08-31 09:22:23 +0100135 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000136 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000137 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
telsoa01c577f2c2018-08-31 09:22:23 +0100139 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000140 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000141 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142 QuantizedVector<T>(qScale, qOffset, {
143 1, 1, 1,
144 1, -1, 1,
145 1, 1, 1,
146 1, 1, 1,
147 1, 1, 1,
148
149 0, 0, 0,
150 0, 0, 0,
151 0, 0, 0,
152 0, 0, 0,
153 0, 0, 0,
154
155 2, 2, 2,
156 2, 2, 2,
157 2, 2, 2,
158 2, 2, 2,
159 2, 2, 2,
160
161
162 0, 0, 0,
163 0, 0, 0,
164 0, 0, 0,
165 0, 0, 0,
166 0, 0, 0,
167
168 1, 1, 1,
169 1, 1, 1,
170 1, 1, 1,
171 1, 1, 1,
172 1, 1, 1,
173
174 0, 0, 0,
175 0, 0, 0,
176 0, 0, 0,
177 0, 0, 0,
178 0, 0, 0
179 })));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000182 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000183 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188 -23.5f, -23.5f, -23.5f,
189 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190 -23.5f, -23.5f, -23.5f,
191
192 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196 })));
197
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199 workloadFactory,
200 memoryManager,
201 input,
202 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100203 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000204 expectedOutput,
205 qScale,
206 qOffset,
207 layout);
telsoa014fcda012018-03-09 14:13:49 +0000208}
209
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000212LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213 armnn::IWorkloadFactory& workloadFactory,
214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215 float qScale,
216 int32_t qOffset,
217 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000218 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000219{
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000221
telsoa01c577f2c2018-08-31 09:22:23 +0100222 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000223 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000224 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 1, 1, 1,
231 1, -1, 1,
232 1, 1, 1,
233
234 0, 0, 0,
235 0, 0, 0,
236 0, 0, 0,
237
238 2, 2, 2,
239 2, 2, 2,
240 2, 2, 2,
241
242
243 0, 0, 0,
244 0, 0, 0,
245 0, 0, 0,
246
247 1, 1, 1,
248 1, 1, 1,
249 1, 1, 1,
250
251 0, 0, 0,
252 0, 0, 0,
253 0, 0, 0
254 })));
255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000258 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259 QuantizedVector<T>(qScale, qOffset, {
260 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273 })));
274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000275 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276 workloadFactory,
277 memoryManager,
278 input,
279 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100280 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000281 expectedOutput,
282 qScale,
283 qOffset,
284 layout);
telsoa014fcda012018-03-09 14:13:49 +0000285}
286
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289 armnn::IWorkloadFactory& workloadFactory,
290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291 float qScale,
292 int32_t qOffset,
293 bool biasEnabled,
294 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100295{
296 // Use common single-batch 5x5 image.
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100299 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300 {
301 1, 5, 2, 3,
302 8, 7, 3, 6,
303 3, 3, 9, 1
304 });
305
306
307 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100309 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310 4, 5, 6,
311 0, 0, 0,
312 3, 2, 1
313 });
314
315 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000316 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100317
318 const std::vector<float> outputData =
319 {
320 23, 41, 33, 21,
321 44, 65, 76, 52,
322 82, 85, 79, 42
323 };
324
325 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000327 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328 workloadFactory,
329 memoryManager,
330 input,
331 kernel,
332 boost::multi_array<T, 1>(),
333 expectedOutput,
334 dataLayout,
335 qScale,
336 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100337}
338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000340LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341 armnn::IWorkloadFactory& workloadFactory,
342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343 float qScale,
344 int32_t qOffset,
345 bool biasEnabled,
346 const armnn::DataLayout& dataLayout)
347{
348 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000349 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000350 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351 {
352 1, 5, 2, 3, 5,
353 8, 7, 3, 6, 3,
354 3, 3, 9, 1, 9,
355 4, 1, 8, 1, 3,
356 6, 8, 1, 9, 2
357 });
358
359 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000360 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000361 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362 {
363 4, 5, 6,
364 0, 0, 0,
365 3, 2, 1
366 });
367
368 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000370
371 const std::vector<T> outputData =
372 {
373 23, 33, 24,
374 91, 99, 48,
375 26, 50, 19
376 };
377
378 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380 uint32_t padLeft = 1;
381 uint32_t padTop = 1;
382 uint32_t padRight = 1;
383 uint32_t padBottom = 1;
384 uint32_t strideX = 2;
385 uint32_t strideY = 2;
386
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000387 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388 workloadFactory,
389 memoryManager,
390 input,
391 kernel,
392 boost::multi_array<T, 1>(),
393 expectedOutput,
394 dataLayout,
395 qScale,
396 qOffset,
397 padLeft,
398 padTop,
399 padRight,
400 padBottom,
401 strideX,
402 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000415LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416 armnn::IWorkloadFactory& workloadFactory,
417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000419 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000421 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000425LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426 armnn::IWorkloadFactory& workloadFactory,
427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000431 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441 workloadFactory,
442 memoryManager,
443 0.f,
444 0,
445 biasEnabled,
446 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100447}
448
Mike Kelly7332ed82018-12-20 17:03:06 +0000449LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 bool biasEnabled,
453 const armnn::DataLayout layout)
454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000455 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456 workloadFactory,
457 memoryManager,
458 0.f,
459 0,
460 biasEnabled,
461 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000462}
463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000464LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000470 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000472}
473
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100474LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475 armnn::IWorkloadFactory& workloadFactory,
476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477 bool biasEnabled,
478 const armnn::DataLayout layout)
479{
480return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482}
483
484LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485 armnn::IWorkloadFactory& workloadFactory,
486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487 bool biasEnabled,
488 const armnn::DataLayout layout)
489{
490 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492}
493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000494template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000496LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000499 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 float qScale,
501 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000502{
telsoa01c577f2c2018-08-31 09:22:23 +0100503 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000504 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000505 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506 QuantizedVector<T>(qScale, qOffset, {
507 11,21,31,
508 12,22,32,
509 13,23,33
510 })));
511
telsoa01c577f2c2018-08-31 09:22:23 +0100512 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000514 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515 QuantizedVector<T>(qScale, qOffset, {
516 -11,-21,
517 -12,-22,
518 })));
519
telsoa01c577f2c2018-08-31 09:22:23 +0100520// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000521// Manually calculated like this:
522//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
523//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
524//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
527//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
528//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 0, 0, 0, 0, 0, 0,
533 -242, -594, -934, -372, 0, 0,
534 -495, -1190, -1850, -725, 0, 0,
535 -538, -1256, -1916, -748, 0, 0,
536 -273, -626, -946, -363, 0, 0,
537 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0,
539 0, 0, 0, 0, 0, 0
540 })));
541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543 workloadFactory,
544 memoryManager,
545 input,
546 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100547 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 expectedOutput,
549 qScale,
550 qOffset,
551 layout,
552 1, // Padding left.
553 2, // Padding top.
554 3, // Padding right.
555 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000556}
557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000560LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000563 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000564 float qScale,
565 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000566{
telsoa01c577f2c2018-08-31 09:22:23 +0100567 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000568 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000569 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570 QuantizedVector<T>(qScale, qOffset, {
571 11,21,31,41,51,
572 12,22,32,42,52,
573 13,23,33,43,53,
574 14,24,34,44,54,
575 15,25,35,45,55,
576 })));
577
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000580 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581 QuantizedVector<T>(qScale, qOffset, {
582 -11,-21,-31,-41,
583 -12,-22,-32,-42,
584 -13,-23,-33,-43,
585 -14,-24,-34,-44,
586 })));
587
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000589 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000590 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000593 -7140, -10580, -13940, -9300, -5230,
594 -9590, -14120, -18520, -12290, -6860,
595 -9980, -14560, -18960, -12560, -7000,
596 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100597 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000598 })));
599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000600 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000602 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000603 input,
604 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100605 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000606 expectedOutput,
607 qScale,
608 qOffset,
narpra015f703182018-10-26 16:24:58 +0100609 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100610 1, // Padding left.
611 1, // Padding top.
612 2, // Padding right.
613 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100614}
615
Teresa Charlinedeeb162019-06-14 11:09:19 +0100616LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617 armnn::IWorkloadFactory& workloadFactory,
618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619 armnn::DataLayout layout)
620{
621 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622 workloadFactory, memoryManager, layout, 0.0f, 0);
623}
624
625LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 armnn::DataLayout layout)
629{
630 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631 <armnn::DataType::Float32, armnn::DataType::Float32>(
632 workloadFactory, memoryManager, layout, 0.0f, 0);
633}
634
635LayerTestResult<float, 4> Convolution1dTest(
636 armnn::IWorkloadFactory& workloadFactory,
637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638 bool biasEnabled)
639{
640 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642}
643
644LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645 armnn::IWorkloadFactory& workloadFactory,
646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647 bool biasEnabled)
648{
649 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651}
652
653LayerTestResult<float,4> CompareConvolution2dTest(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656 armnn::IWorkloadFactory& refWorkloadFactory)
657{
658 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659 workloadFactory, memoryManager, refWorkloadFactory);
660}
661
662template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664 armnn::IWorkloadFactory& workloadFactory,
665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666 const std::vector<float>& inputNoQuantizedValues,
667 armnn::TensorInfo& inputTensorInfo,
668 const std::vector<float>& kernelNoQuantizedValues,
669 armnn::TensorInfo& kernelTensorInfo,
670 const std::vector<float>& outputExpectedNoQuantizedValues,
671 armnn::TensorInfo& outputTensorInfo,
672 uint32_t dilationX,
673 uint32_t dilationY,
674 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100675 uint32_t padLeft = 0,
676 uint32_t padTop = 0,
677 uint32_t padRight = 0,
678 uint32_t padBottom = 0,
679 uint32_t strideX = 1,
680 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100681 bool biasEnabled = false
682)
683{
684 float qScale;
685 int32_t qOffset;
686 switch (ArmnnType)
687 {
688 case armnn::DataType::QuantisedAsymm8:
689 {
690 qScale = 0.1f;
691 qOffset = 128;
692 break;
693 }
694 case armnn::DataType::QuantisedSymm16:
695 {
696 qScale = 0.1f;
697 qOffset = 0;
698 break;
699 }
700 case armnn::DataType::Float32:
701 default:
702 {
703 qScale = 0.f;
704 qOffset = 0;
705 break;
706 }
707 }
708
709 inputTensorInfo.SetQuantizationScale(qScale);
710 inputTensorInfo.SetQuantizationOffset(qOffset);
711 kernelTensorInfo.SetQuantizationScale(qScale);
712 kernelTensorInfo.SetQuantizationOffset(qOffset);
713 outputTensorInfo.SetQuantizationScale(qScale);
714 outputTensorInfo.SetQuantizationOffset(qOffset);
715
716 auto input = MakeTensor<T, 4>(inputTensorInfo,
717 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718 inputTensorInfo.GetQuantizationOffset(),
719 inputNoQuantizedValues)));
720 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722 kernelTensorInfo.GetQuantizationOffset(),
723 kernelNoQuantizedValues)));
724 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726 outputTensorInfo.GetQuantizationOffset(),
727 outputExpectedNoQuantizedValues)));
728
Teresa Charlinedeeb162019-06-14 11:09:19 +0100729 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730 workloadFactory,
731 memoryManager,
732 input,
733 kernel,
734 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735 expectedOutput,
736 qScale,
737 qOffset,
738 layout,
739 padLeft,
740 padTop,
741 padRight,
742 padBottom,
743 strideX,
744 strideY,
745 dilationX,
746 dilationY);
747}
748
749template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753 bool biasEnabled,
754 const armnn::DataLayout layout)
755{
756 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757 std::vector<float> inputNoQuantizedValues =
758 {
759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769 };
770
771 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772 std::vector<float> kernelNoQuantizedValues =
773 {
774 1, 2, 3,
775 4, 5, 6,
776 7, 8, 9
777 };
778
779 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782 std::vector<float> outputExpectedNoQuantizedValues =
783 {
784 6., 5., 5., 5.,
785 6., 5., 5., 5.,
786 6., 5., 5., 5.,
787 3., 2., 2., 2.
788 };
789
790 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791 workloadFactory,
792 memoryManager,
793 inputNoQuantizedValues,
794 inputTensorInfo,
795 kernelNoQuantizedValues,
796 kernelTensorInfo,
797 outputExpectedNoQuantizedValues,
798 outputTensorInfo,
799 3,
800 3,
801 layout,
802 biasEnabled);
803}
804
805template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 bool biasEnabled,
810 const armnn::DataLayout layout)
811{
812 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813 std::vector<float> inputNoQuantizedValues =
814 {
815 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836 };
837
838 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839 std::vector<float> kernelNoQuantizedValues =
840 {
841 1, 2, 3,
842 4, 5, 6,
843 7, 8, 9,
844
845 1, 2, 3,
846 4, 5, 6,
847 7, 8, 9
848 };
849
850 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853 std::vector<float> outputExpectedNoQuantizedValues =
854 {
855 12., 10., 10., 10.,
856 12., 10., 10., 10.,
857 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100858 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100859 };
860
861 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862 workloadFactory,
863 memoryManager,
864 inputNoQuantizedValues,
865 inputTensorInfo,
866 kernelNoQuantizedValues,
867 kernelTensorInfo,
868 outputExpectedNoQuantizedValues,
869 outputTensorInfo,
870 3,
871 3,
872 layout,
873 biasEnabled);
874}
875
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100876template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878 armnn::IWorkloadFactory &workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880 bool biasEnabled,
881 const armnn::DataLayout layout)
882{
883 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884 std::vector<float> inputNoQuantizedValues =
885 {
886 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896 };
897
898 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899 std::vector<float> kernelNoQuantizedValues =
900 {
901 1, 2,
902 3, 4
903 };
904
905 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
Jan Eilers0bf6b232019-07-12 10:46:33 +0100907 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100908 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909 std::vector<float> outputExpectedNoQuantizedValues =
910 {
911 4, 7, 7, 3,
912 6, 10, 10, 4,
913 6, 10, 10, 4,
914 2, 3, 3, 1
915 };
916 uint32_t padLeft = 1;
917 uint32_t padTop = 1;
918 uint32_t padRight = 1;
919 uint32_t padBottom = 1;
920
921 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922 workloadFactory,
923 memoryManager,
924 inputNoQuantizedValues,
925 inputTensorInfo,
926 kernelNoQuantizedValues,
927 kernelTensorInfo,
928 outputExpectedNoQuantizedValues,
929 outputTensorInfo,
930 2,
931 2,
932 layout,
933 padLeft,
934 padTop,
935 padRight,
936 padBottom,
937 3,
938 3,
939 biasEnabled
940 );
941}
942
Teresa Charlinedeeb162019-06-14 11:09:19 +0100943template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945 armnn::IWorkloadFactory&,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947 bool,
948 armnn::DataLayout);
949
950template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952 armnn::IWorkloadFactory&,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954 bool,
955 armnn::DataLayout);
956
957template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959 armnn::IWorkloadFactory&,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961 bool,
962 armnn::DataLayout);
963
964template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966 armnn::IWorkloadFactory&,
967 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968 bool,
969 armnn::DataLayout);
970
971template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973 armnn::IWorkloadFactory&,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975 bool,
976 armnn::DataLayout);
977
978template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980 armnn::IWorkloadFactory&,
981 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982 bool,
983 armnn::DataLayout);
984
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100985template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987 armnn::IWorkloadFactory &workloadFactory,
988 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989 bool biasEnabled,
990 const armnn::DataLayout layout);
991
992template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994 armnn::IWorkloadFactory &workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996 bool biasEnabled,
997 const armnn::DataLayout layout);
998
999template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001 armnn::IWorkloadFactory &workloadFactory,
1002 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003 bool biasEnabled,
1004 const armnn::DataLayout layout);
1005
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001006template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001008LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009 armnn::IWorkloadFactory& workloadFactory,
1010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011 float qScale,
1012 int32_t qOffset,
1013 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001014 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001015{
telsoa01c577f2c2018-08-31 09:22:23 +01001016 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001018 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001019 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020 {
surmeh013537c2c2018-05-18 16:31:43 +01001021 0, 1, 2, 3, 4,
1022 5, 6, 7, 8, 9,
1023 10, 11, 12, 13, 14,
1024 15, 16, 17, 18, 19,
1025 20, 21, 22, 23, 24,
1026
1027 25, 26, 27, 28, 29,
1028 30, 31, 32, 33, 34,
1029 35, 36, 37, 38, 39,
1030 40, 41, 42, 43, 44,
1031 45, 46, 47, 48, 49
1032 })));
1033
telsoa01c577f2c2018-08-31 09:22:23 +01001034 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001036 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001037 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038 {
surmeh013537c2c2018-05-18 16:31:43 +01001039 32, 31, 30, 29,
1040 28, 27, 26, 25,
1041 24, 23, 22, 21,
1042 20, 19, 18, 17,
1043
1044 16, 15, 14, 13,
1045 12, 11, 10, 9,
1046 8, 7, 6, 5,
1047 4, 3, 2, 1
1048 })));
1049
telsoa01c577f2c2018-08-31 09:22:23 +01001050 // Expected output is 1 batch of a 2-channel 5x5 image.
1051 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001052 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001053 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001054 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055 {
surmeh013537c2c2018-05-18 16:31:43 +01001056 1062, 1580, 1850, 1530, 1117,
1057 2140, 3108, 3500, 2842, 2042,
1058 3580, 5068, 5460, 4342, 3062,
1059 3618, 5072, 5390, 4248, 2971,
1060 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061
surmeh013537c2c2018-05-18 16:31:43 +01001062 1550, 2284, 2362, 1955, 1428,
1063 2910, 4206, 4342, 3528, 2536,
1064 3390, 4886, 5022, 4068, 2916,
1065 3566, 5056, 5182, 4133, 2922,
1066 3100, 4352, 4452, 3517, 2465
1067 })));
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001072 input,
1073 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001074 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001075 expectedOutput,
1076 qScale,
1077 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001078 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001079 1, // Padding left.
1080 1, // Padding top.
1081 2, // Padding right.
1082 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001083 1, // strideX
1084 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001085}
1086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001087template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001089LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090 armnn::IWorkloadFactory& workloadFactory,
1091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092 float qScale,
1093 int32_t qOffset,
1094 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001095{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 auto layout = armnn::DataLayout::NHWC;
1097
1098 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001099 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101 {
1102 0, 1, 2, 3, 4,
1103 5, 6, 7, 8, 9,
1104 10, 11, 12, 13, 14,
1105 15, 16, 17, 18, 19,
1106 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001107
Teresa Charlin20b1f882019-06-19 09:34:37 +01001108 25, 26, 27, 28, 29,
1109 30, 31, 32, 33, 34,
1110 35, 36, 37, 38, 39,
1111 40, 41, 42, 43, 44,
1112 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001113 })));
1114
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001115 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001116 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001119 32, 31, 30, 29,
1120 28, 27, 26, 25,
1121 24, 23, 22, 21,
1122 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123
Matteo Martincigh747ef822018-12-18 09:26:39 +00001124 16, 15, 14, 13,
1125 12, 11, 10, 9,
1126 8, 7, 6, 5,
1127 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001128 })));
1129
Teresa Charlin20b1f882019-06-19 09:34:37 +01001130 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001131 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001132 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133 {
1134 1062, 1580, 1850, 1530, 1117,
1135 2140, 3108, 3500, 2842, 2042,
1136 3580, 5068, 5460, 4342, 3062,
1137 3618, 5072, 5390, 4248, 2971,
1138 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001139
Teresa Charlin20b1f882019-06-19 09:34:37 +01001140 1550, 2284, 2362, 1955, 1428,
1141 2910, 4206, 4342, 3528, 2536,
1142 3390, 4886, 5022, 4068, 2916,
1143 3566, 5056, 5182, 4133, 2922,
1144 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001145 })));
1146
Teresa Charlin20b1f882019-06-19 09:34:37 +01001147 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001148 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001149 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001150 input,
1151 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001152 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001153 expectedOutput,
1154 qScale,
1155 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001156 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001157 1, // Padding left.
1158 1, // Padding top.
1159 2, // Padding right.
1160 2, // Padding bottom.
1161 1, // strideX
1162 1); // strideY
1163}
1164
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166 typename T = armnn::ResolveType<ArmnnType>>
1167LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168 armnn::IWorkloadFactory& workloadFactory,
1169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170 float qScale,
1171 int32_t qOffset,
1172 bool biasEnabled)
1173{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001174 auto layout = armnn::DataLayout::NHWC;
1175
1176 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001177 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001178 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001180 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0
1189 })));
1190
1191 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001193 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 1, 2, 3,
1196 4, 5, 6,
1197 7, 8, 9
1198 })));
1199
1200 uint32_t padLeft = 0;
1201 uint32_t padTop = 0;
1202 uint32_t padRight = 0;
1203 uint32_t padBottom = 0;
1204 uint32_t strideX = 1;
1205 uint32_t strideY = 1;
1206 uint32_t dilationX = 3;
1207 uint32_t dilationY = 3;
1208
1209 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001210 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001212 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001214 5, 5, 5,
1215 5, 5, 5,
1216 5, 5, 5
1217 })));
1218
Teresa Charlin20b1f882019-06-19 09:34:37 +01001219 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001220 workloadFactory,
1221 memoryManager,
1222 input,
1223 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001224 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001225 expectedOutput,
1226 qScale,
1227 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001228 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001229 padLeft,
1230 padTop,
1231 padRight,
1232 padBottom,
1233 strideX,
1234 strideY,
1235 dilationX,
1236 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001237}
1238
Teresa Charlin20b1f882019-06-19 09:34:37 +01001239
1240template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242 armnn::IWorkloadFactory& workloadFactory,
1243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244 const std::vector<float>& inputNoQuantizedValues,
1245 armnn::TensorInfo& inputTensorInfo,
1246 const std::vector<float>& kernelNoQuantizedValues,
1247 armnn::TensorInfo& kernelTensorInfo,
1248 const std::vector<float>& outputExpectedNoQuantizedValues,
1249 armnn::TensorInfo& outputTensorInfo,
1250 uint32_t dilationX,
1251 uint32_t dilationY,
1252 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253 bool biasEnabled = false)
1254{
1255 float qScale;
1256 int32_t qOffset;
1257 switch (ArmnnType)
1258 {
1259 case armnn::DataType::QuantisedAsymm8:
1260 {
1261 qScale = 0.1f;
1262 qOffset = 128;
1263 break;
1264 }
1265 case armnn::DataType::QuantisedSymm16:
1266 {
1267 qScale = 0.1f;
1268 qOffset = 0;
1269 break;
1270 }
1271 case armnn::DataType::Float32:
1272 default:
1273 {
1274 qScale = 0.f;
1275 qOffset = 0;
1276 break;
1277 }
1278 }
1279
1280 inputTensorInfo.SetQuantizationScale(qScale);
1281 inputTensorInfo.SetQuantizationOffset(qOffset);
1282 kernelTensorInfo.SetQuantizationScale(qScale);
1283 kernelTensorInfo.SetQuantizationOffset(qOffset);
1284 outputTensorInfo.SetQuantizationScale(qScale);
1285 outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287 auto input = MakeTensor<T, 4>(inputTensorInfo,
1288 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289 inputTensorInfo.GetQuantizationOffset(),
1290 inputNoQuantizedValues)));
1291 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293 kernelTensorInfo.GetQuantizationOffset(),
1294 kernelNoQuantizedValues)));
1295 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297 outputTensorInfo.GetQuantizationOffset(),
1298 outputExpectedNoQuantizedValues)));
1299
1300 uint32_t padLeft = 0;
1301 uint32_t padTop = 0;
1302 uint32_t padRight = 0;
1303 uint32_t padBottom = 0;
1304 uint32_t strideX = 1;
1305 uint32_t strideY = 1;
1306
1307 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308 workloadFactory,
1309 memoryManager,
1310 input,
1311 kernel,
1312 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313 expectedOutput,
1314 qScale,
1315 qOffset,
1316 layout,
1317 padLeft,
1318 padTop,
1319 padRight,
1320 padBottom,
1321 strideX,
1322 strideY,
1323 dilationX,
1324 dilationY);
1325}
1326
1327template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 bool biasEnabled,
1332 const armnn::DataLayout layout)
1333{
1334 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335 std::vector<float> inputNoQuantizedValues =
1336 {
1337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347 };
1348
1349 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350 std::vector<float> kernelNoQuantizedValues =
1351 {
1352 1, 2, 3,
1353 4, 5, 6,
1354 7, 8, 9
1355 };
1356
1357 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360 std::vector<float> outputExpectedNoQuantizedValues =
1361 {
1362 6., 5., 5., 5.,
1363 6., 5., 5., 5.,
1364 6., 5., 5., 5.,
1365 3., 2., 2., 2.
1366 };
1367
1368 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369 workloadFactory,
1370 memoryManager,
1371 inputNoQuantizedValues,
1372 inputTensorInfo,
1373 kernelNoQuantizedValues,
1374 kernelTensorInfo,
1375 outputExpectedNoQuantizedValues,
1376 outputTensorInfo,
1377 3,
1378 3,
1379 layout,
1380 biasEnabled);
1381}
1382
1383template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385 armnn::IWorkloadFactory& workloadFactory,
1386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387 bool biasEnabled,
1388 const armnn::DataLayout layout)
1389{
1390 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391 std::vector<float> inputNoQuantizedValues =
1392 {
1393 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414 };
1415
1416 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417 std::vector<float> kernelNoQuantizedValues =
1418 {
1419 1, 2, 3,
1420 4, 5, 6,
1421 7, 8, 9,
1422
1423 1, 2, 3,
1424 4, 5, 6,
1425 7, 8, 9
1426 };
1427
1428 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431 std::vector<float> outputExpectedNoQuantizedValues =
1432 {
1433 6., 5., 5., 5.,
1434 6., 5., 5., 5.,
1435 6., 5., 5., 5.,
1436 3., 2., 2., 2.,
1437
1438 6., 5., 5., 5.,
1439 6., 5., 5., 5.,
1440 6., 5., 5., 5.,
1441 3., 2., 2., 2.
1442 };
1443
1444 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445 workloadFactory,
1446 memoryManager,
1447 inputNoQuantizedValues,
1448 inputTensorInfo,
1449 kernelNoQuantizedValues,
1450 kernelTensorInfo,
1451 outputExpectedNoQuantizedValues,
1452 outputTensorInfo,
1453 3,
1454 3,
1455 layout,
1456 biasEnabled);
1457}
1458
1459
1460template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462 armnn::IWorkloadFactory&,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464 bool,
1465 armnn::DataLayout);
1466
1467template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469 armnn::IWorkloadFactory&,
1470 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471 bool,
1472 armnn::DataLayout);
1473
1474template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476 armnn::IWorkloadFactory&,
1477 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478 bool,
1479 armnn::DataLayout);
1480
1481template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483 armnn::IWorkloadFactory&,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485 bool,
1486 armnn::DataLayout);
1487
1488template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490 armnn::IWorkloadFactory&,
1491 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492 bool,
1493 armnn::DataLayout);
1494
1495template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497 armnn::IWorkloadFactory&,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499 bool,
1500 armnn::DataLayout);
1501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503 armnn::IWorkloadFactory& workloadFactory,
1504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001506 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001508 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001509 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001510}
1511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513 armnn::IWorkloadFactory& workloadFactory,
1514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001517 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001519}
1520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001525 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001527 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001528 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001531LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534{
1535 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538 std::vector<float> kernelData;
1539 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540 for (unsigned int i = 0; i < 64; ++i)
1541 {
1542 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543 }
1544 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547 std::vector<float> expectedOutputData(64, 0.f);
1548 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552 workloadFactory,
1553 memoryManager,
1554 input,
1555 kernel,
1556 boost::multi_array<float, 1>(),
1557 expectedOutput,
1558 0.f,
1559 0,
1560 armnn::DataLayout::NCHW);
1561}
1562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001563LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001567 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001569 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001570 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001571}
1572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001573LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574 armnn::IWorkloadFactory& workloadFactory,
1575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001577 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001578{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001579 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001580 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001581}
1582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001583LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584 armnn::IWorkloadFactory& workloadFactory,
1585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001587 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001589 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001590 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001591}
1592
Bruno Goncalves22972f02019-04-26 21:03:24 -03001593LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594 armnn::IWorkloadFactory& workloadFactory,
1595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596{
1597 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001598 workloadFactory,
1599 memoryManager,
1600 0.f,
1601 0,
1602 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001603}
1604
Ruomei Yan88d44b82019-05-23 14:29:06 +01001605LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 bool biasEnabled,
1609 const armnn::DataLayout layout)
1610{
1611 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613}
1614
1615LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 bool biasEnabled,
1619 const armnn::DataLayout layout)
1620{
1621 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623}
1624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001625LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001629 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001631 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001633}
1634
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001635LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636 armnn::IWorkloadFactory& workloadFactory,
1637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638 armnn::IWorkloadFactory& refWorkloadFactory,
1639 const armnn::DataLayout layout)
1640{
1641 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642 workloadFactory, memoryManager, refWorkloadFactory, layout);
1643}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001644
1645LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001648{
1649 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001651 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
1658 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001660 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001666{
1667 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001669 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001670}
1671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001672LayerTestResult<float,2> SimpleSoftmaxTest(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001677 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001678}
1679
Francis Murtagh07f21212019-07-23 09:50:50 +01001680LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681 armnn::IWorkloadFactory& workloadFactory,
1682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683 float beta,
1684 int axis)
1685{
1686 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687}
1688
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001689LayerTestResult<float,3> Simple3dSoftmaxTest(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692 float beta)
1693{
Francis Murtagh07f21212019-07-23 09:50:50 +01001694 Simple3dSoftmaxOutputData data;
1695 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696 data.inputShape, data.outputData, data.inputData);
1697}
1698
1699LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 float beta,
1703 int axis)
1704{
1705 armnn::TensorShape inputShape;
1706 std::vector<float> inputData;
1707 std::vector<float> outputData;
1708 switch (axis)
1709 {
1710 case -3:
1711 case 0:
1712 {
1713 inputShape = {5, 2, 2};
1714
1715 inputData =
1716 {
1717 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720 };
1721
1722 outputData =
1723 {
1724 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725 0.236882800924671f,
1726 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727 0.087144312427294f,
1728
1729 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730 0.032058600957022f,
1731 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732 7.246299848982885e-08f
1733 };
1734 break;
1735 }
1736 case -2:
1737 case 1:
1738 {
1739 inputShape = {2, 5, 2};
1740
1741 inputData =
1742 {
1743 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746 };
1747
1748 outputData =
1749 {
1750 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751 0.087144312427294f,
1752 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753 7.246299848982885e-08f,
1754
1755 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756 0.087144312427294f,
1757 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758 7.246299848982885e-08f
1759 };
1760 break;
1761 }
1762 case -1:
1763 case 2:
1764 {
1765 inputShape = {2, 2, 5};
1766
1767 inputData =
1768 {
1769 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771 };
1772
1773 outputData =
1774 {
1775 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776 7.246299848982885e-08f,
1777 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778 7.246299848982885e-08f,
1779
1780 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781 7.246299848982885e-08f,
1782 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783 7.246299848982885e-08f
1784 };
1785 break;
1786 }
1787 }
1788
1789 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790 inputShape, outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001791}
1792
1793LayerTestResult<float,4> Simple4dSoftmaxTest(
1794 armnn::IWorkloadFactory& workloadFactory,
1795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796 float beta)
1797{
Francis Murtagh07f21212019-07-23 09:50:50 +01001798 Simple4dSoftmaxData data;
1799 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800 data.outputData, data.inputData);
1801}
1802
1803LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804 armnn::IWorkloadFactory& workloadFactory,
1805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806 float beta,
1807 int axis)
1808{
1809 armnn::TensorShape inputShape;
1810 std::vector<float> inputData;
1811 std::vector<float> outputData;
1812 switch (axis)
1813 {
1814 case -4:
1815 case 0:
1816 {
1817 inputShape = {5, 2, 2, 2};
1818
1819 inputData =
1820 {
1821 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825 };
1826
1827 outputData =
1828 {
1829 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830 0.643914213228014f,
1831 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832 0.236882800924671f,
1833 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834 0.236882800924671f,
1835 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836 0.087144312427294f,
1837
1838 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839 0.032058600957022f,
1840 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841 0.032058600957022f,
1842 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843 7.246299848982885e-08f,
1844 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845 7.246299848982885e-08f, 7.246299848982885e-08f
1846 };
1847 break;
1848 }
1849 case -3:
1850 case 1:
1851 {
1852 inputShape = {2, 5, 2, 2};
1853
1854 inputData =
1855 {
1856 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860 };
1861
1862 outputData =
1863 {
1864 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865 0.236882800924671f,
1866 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867 0.087144312427294f,
1868 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869 0.032058600957022f,
1870 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871 7.246299848982885e-08f,
1872
1873
1874 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875 0.236882800924671f,
1876 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877 0.087144312427294f,
1878 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879 0.032058600957022f,
1880 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881 7.246299848982885e-08f
1882 };
1883 break;
1884 }
1885 case -2:
1886 case 2:
1887 {
1888 inputShape = {2, 2, 5, 2};
1889
1890 inputData =
1891 {
1892 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896 };
1897
1898 outputData =
1899 {
1900 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901 0.087144312427294f,
1902 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903 7.246299848982885e-08f,
1904 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905 0.087144312427294f,
1906 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907 7.246299848982885e-08f,
1908
1909 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910 0.087144312427294f,
1911 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912 7.246299848982885e-08f,
1913 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914 0.087144312427294f,
1915 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916 7.246299848982885e-08f
1917 };
1918 break;
1919 }
1920 case -1:
1921 case 3:
1922 {
1923 inputShape = {2, 2, 2, 5};
1924
1925 inputData =
1926 {
1927 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931 };
1932
1933 outputData =
1934 {
1935 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936 7.246299848982885e-08f,
1937 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938 7.246299848982885e-08f,
1939 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940 7.246299848982885e-08f,
1941 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942 7.246299848982885e-08f,
1943
1944 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945 7.246299848982885e-08f,
1946 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947 7.246299848982885e-08f,
1948 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949 7.246299848982885e-08f,
1950 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951 7.246299848982885e-08f
1952 };
1953 break;
1954 }
1955 }
1956
1957 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958 outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001959}
1960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001961LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962 armnn::IWorkloadFactory& workloadFactory,
1963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001966 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001967}
1968
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001969LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970 armnn::IWorkloadFactory& workloadFactory,
1971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972 float beta)
1973{
Francis Murtagh07f21212019-07-23 09:50:50 +01001974 Simple3dSoftmaxOutputData data;
1975 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001977}
1978
1979LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982 float beta)
1983{
Francis Murtagh07f21212019-07-23 09:50:50 +01001984 Simple4dSoftmaxData data;
1985
1986 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001988}
1989
nikraj01248683f2019-05-29 16:46:50 +01001990LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993 float beta)
1994{
1995 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996}
1997
1998LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999 armnn::IWorkloadFactory& workloadFactory,
2000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001 float beta)
2002{
Francis Murtagh07f21212019-07-23 09:50:50 +01002003 Simple3dSoftmaxOutputData data;
2004 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002006}
2007
2008LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011 float beta)
2012{
Francis Murtagh07f21212019-07-23 09:50:50 +01002013 Simple4dSoftmaxData data;
2014
2015 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002017}
2018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002019LayerTestResult<float,4> CompareNormalizationTest(
2020 armnn::IWorkloadFactory& workloadFactory,
2021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022 armnn::IWorkloadFactory& refWorkloadFactory,
2023 armnn::NormalizationAlgorithmChannel normChannel,
2024 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00002025{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002026 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00002027}
2028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002029LayerTestResult<float,2> CompareSoftmaxTest(
2030 armnn::IWorkloadFactory& workloadFactory,
2031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002032 armnn::IWorkloadFactory& refWorkloadFactory,
2033 float beta)
2034{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002035 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002037}
2038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 armnn::IWorkloadFactory& refWorkloadFactory,
2043 float beta)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002047}
2048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002049std::vector<LayerTestResult<float,3>> SplitterTest(
2050 armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002053 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00002054}
2055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002056std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002060 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002061}
2062
Ruomei Yan25339c32019-05-28 16:48:20 +01002063std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064 armnn::IWorkloadFactory& workloadFactory,
2065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066{
2067 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068}
2069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002070LayerTestResult<float, 3> CopyViaSplitterTest(
2071 armnn::IWorkloadFactory& workloadFactory,
2072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002074 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002075}
2076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002077LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078 armnn::IWorkloadFactory& workloadFactory,
2079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002081 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002082}
2083
Ruomei Yan25339c32019-05-28 16:48:20 +01002084LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085 armnn::IWorkloadFactory& workloadFactory,
2086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087{
2088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089}
2090
Jan Eilers38e05bd2019-06-26 13:10:09 +01002091void LstmUtilsZeroVectorTest()
2092{
2093 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095 {2., 3., 3., 4.}));
2096
2097 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098 {0., 0., 0., 0.}));
2099
2100 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101}
2102
2103void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104{
2105 uint32_t batchSize = 2;
2106 uint32_t vecSize = 4;
2107 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
2110 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
2111
2112 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
2114 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
2115
2116 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117 vecSize, batchSize, expectedOutput);
2118}
2119
2120void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121{
2122 uint32_t batchSize = 2;
2123 uint32_t vecSize = 4;
2124 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2127 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2128
2129 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2131 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2132
2133 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134 vecSize, batchSize, expectedOutput);
2135}
2136
2137void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138{
2139 uint32_t batchSize = 2;
2140 uint32_t vecSize = 4;
2141 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2144 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
2145
2146 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2148 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
2149
2150 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151 vecSize, batchSize, expectedOutput);
2152}
2153
2154
2155void LstmUtilsVectorBatchVectorCwiseProductTest()
2156{
2157 uint32_t batchSize = 4;
2158 uint32_t vecSize = 29;
2159 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2162 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
2164
2165 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167 { /* batch 0 */
2168 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2169 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2170 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
2171 /* batch 1 */
2172 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
2173 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
2175 /* batch 2 */
2176 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
2177 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
2179 /* batch 3 */
2180 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
2181 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
2183
2184 // Expect output = input * output + output.
2185 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186 { /* batch 0 */
2187 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
2188 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
2189 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
2190 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
2191 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
2192 /* batch 1 */
2193 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
2194 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
2198 /* batch 2 */
2199 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
2200 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
2201 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
2202 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
2203 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
2204 /* batch 3 */
2205 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
2206 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
2207 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
2208 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
2209 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
2210
2211 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212 vecSize, batchSize, expectedOutput);
2213}
2214
2215
2216void LstmUtilsVectorBatchVectorAddTest()
2217{
2218 uint32_t batchSize = 2;
2219 uint32_t vecSize = 3;
2220 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222 { 0.0f, -0.5f, 1.0f}));
2223
2224 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226 { 1.0f, 2.0f, 3.0f, //batch 0
2227 4.0f, 5.0f, 6.0f})); //batch 1
2228
2229 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230 { 1.0f, 1.5f, 4.0f,
2231 4.0f, 4.5f, 7.0f}));
2232
2233 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234 vecSize, batchSize, expectedOutput);
2235}
2236
2237
telsoa01c577f2c2018-08-31 09:22:23 +01002238LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239 armnn::IWorkloadFactory& workloadFactory,
2240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002242 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002243 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244 { 2., 3., 3., 4. }));
2245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002246 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002247 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002250 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002251 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002252}
2253
2254LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01002255 armnn::IWorkloadFactory& workloadFactory,
2256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002258 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002259 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002263 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002264 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2266 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2267 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2268 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2269 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2270 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
2271 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002272 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002274}
2275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002276LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277 armnn::IWorkloadFactory& workloadFactory,
2278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002280 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002281 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282 {2., 3., 3., 4.}));
2283
2284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002285 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002286 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2288 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
2289
Conor Kennedyb9971c92019-05-07 07:14:23 +01002290 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002291 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002292}
2293
Jan Eilers38e05bd2019-06-26 13:10:09 +01002294
2295LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
2302 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
2303
2304 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2307 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2308 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309 workloadFactory, memoryManager, input, expectedOutput);
2310}
2311
2312
Conor Kennedyb9971c92019-05-07 07:14:23 +01002313LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314 armnn::IWorkloadFactory& workloadFactory,
2315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316{
2317 const float qScale = 1.0f;
2318 const int32_t qOffset = 0;
2319
2320 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323 armnn::TensorInfo inputDesc({2, 2}, datatype);
2324 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325 std::vector<float>{2., 3., 3., 4.}));
2326
2327 armnn::TensorInfo outputDesc({2, 4}, datatype);
2328 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2330 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2331
2332 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335}
2336
2337LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338 armnn::IWorkloadFactory& workloadFactory,
2339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340{
2341 const float qScale = 1.0f;
2342 const int32_t qOffset = 0;
2343
2344 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349 std::vector<float>({ 2., 3., 3., 4. })));
2350
2351 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353 qOffset, std::vector<float>(
2354 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359}
2360
2361LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362 armnn::IWorkloadFactory& workloadFactory,
2363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364{
2365 const float qScale = 2.0f;
2366 const int32_t qOffset = 0;
2367
2368 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373 qOffset, std::vector<float>(
2374 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379 qOffset, std::vector<float>(
2380 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2381 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2382 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2383 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2384 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2385 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2386
2387 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389}
2390
2391LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392 armnn::IWorkloadFactory& workloadFactory,
2393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394{
2395 const float qScale = 1.0f;
2396 const int32_t qOffset = 0;
2397
2398 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400 armnn::TensorInfo inputDesc({2, 2}, datatype);
2401 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402 qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404 armnn::TensorInfo outputDesc({2, 4}, datatype);
2405 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2407 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2408
2409 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411}
2412
James Conroy9c3cae82019-08-01 16:01:48 +01002413// QuantizedLstm
2414LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2415 armnn::IWorkloadFactory& workloadFactory,
2416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2417{
2418 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2419 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2420 {166, 179, 50, 150}));
2421
2422 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2423 boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2424 {140, 151, 146, 112, 136, 156, 142, 112 }));
2425
2426 return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2427}
2428
Jim Flynn4ed6c832019-05-20 11:02:46 +01002429LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002430 armnn::IWorkloadFactory& workloadFactory,
2431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002432{
surmeh013537c2c2018-05-18 16:31:43 +01002433 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002434 unsigned int outputHeight = 6;
2435 unsigned int outputChannels = 3;
2436
surmeh013537c2c2018-05-18 16:31:43 +01002437 unsigned int inputWidth1 = 3;
2438 unsigned int inputHeight1 = 6;
2439 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002440
surmeh013537c2c2018-05-18 16:31:43 +01002441 unsigned int inputWidth2 = 3;
2442 unsigned int inputHeight2 = 6;
2443 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002444
telsoa01c577f2c2018-08-31 09:22:23 +01002445 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002446 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2447 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2448 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002449
2450 LayerTestResult<float,3> ret(outputTensorInfo);
2451
telsoa014fcda012018-03-09 14:13:49 +00002452 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002453 {
2454 1.0f, 2.0f, 3.0f,
2455 4.0f, 5.0f, 6.0f,
2456 7.0f, 8.0f, 9.0f,
2457 10.0f, 11.0f, 12.0f,
2458 13.0f, 14.0f, 15.0f,
2459 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002460
surmeh013537c2c2018-05-18 16:31:43 +01002461 19.0f, 20.0f, 21.0f,
2462 22.0f, 23.0f, 24.0f,
2463 25.0f, 26.0f, 27.0f,
2464 28.0f, 29.0f, 30.0f,
2465 31.0f, 32.0f, 33.0f,
2466 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002467
surmeh013537c2c2018-05-18 16:31:43 +01002468 37.0f, 38.0f, 39.0f,
2469 40.0f, 41.0f, 42.0f,
2470 43.0f, 44.0f, 45.0f,
2471 46.0f, 47.0f, 48.0f,
2472 49.0f, 50.0f, 51.0f,
2473 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002474 })
2475 );
2476
telsoa014fcda012018-03-09 14:13:49 +00002477 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2478 {
surmeh013537c2c2018-05-18 16:31:43 +01002479 1.0f, 2.0f, 3.0f,
2480 4.0f, 5.0f, 6.0f,
2481 7.0f, 8.0f, 9.0f,
2482 10.0f, 11.0f, 12.0f,
2483 13.0f, 14.0f, 15.0f,
2484 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002485
surmeh013537c2c2018-05-18 16:31:43 +01002486 19.0f, 20.0f, 21.0f,
2487 22.0f, 23.0f, 24.0f,
2488 25.0f, 26.0f, 27.0f,
2489 28.0f, 29.0f, 30.0f,
2490 31.0f, 32.0f, 33.0f,
2491 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002492 })
2493 );
2494
2495 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2496 {
surmeh013537c2c2018-05-18 16:31:43 +01002497 37.0f, 38.0f, 39.0f,
2498 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002499 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002500 46.0f, 47.0f, 48.0f,
2501 49.0f, 50.0f, 51.0f,
2502 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002503 })
2504 );
2505
telsoa01c577f2c2018-08-31 09:22:23 +01002506 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002507 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002508
telsoa01c577f2c2018-08-31 09:22:23 +01002509 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002510 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002511
telsoa014fcda012018-03-09 14:13:49 +00002512 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2513
2514 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2515
2516 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2517 subTensorsSupported ?
2518 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2519 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2520
2521 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2522 subTensorsSupported ?
2523 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2524 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2525
Jim Flynne242f2d2019-05-22 14:24:13 +01002526 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002527 armnn::WorkloadInfo info;
2528 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2529 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002530 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2531
2532 data.m_ViewOrigins.push_back(window1);
2533 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002534
Jim Flynn4ed6c832019-05-20 11:02:46 +01002535 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002536
2537 inputHandle1->Allocate();
2538 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002539 outputHandle->Allocate();
2540
2541 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2542 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002543
Derek Lambertif30f7d32019-04-09 10:25:02 +01002544 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002545 workload->Execute();
2546
2547 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2548
2549 return ret;
2550}
2551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002552LayerTestResult<float,4> AdditionTest(
2553 armnn::IWorkloadFactory& workloadFactory,
2554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002555{
2556 unsigned int batchSize = 2;
2557 unsigned int channels = 2;
2558 unsigned int height = 2;
2559 unsigned int width = 3;
2560
2561 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2562 armnn::TensorInfo outputTensorInfo;
2563
2564 unsigned int shape[] = {batchSize, channels, height, width};
2565
2566 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2567 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2568 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2569
2570
2571 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2572 {
2573 0.0f, 2.0f, 1.0f,
2574 0.2f, 1.0f, 2.0f,
2575
2576 1.0f, 2.0f, 1.0f,
2577 0.2f, 1.0f, 2.0f,
2578
2579 0.0f, 2.0f, 1.0f,
2580 4.2f, 1.0f, 2.0f,
2581
2582 0.0f, 0.0f, 1.0f,
2583 0.2f, 1.0f, 2.0f,
2584 }));
2585
2586 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2587 {
2588 1.0f, 2.0f, 1.0f,
2589 0.0f, 1.0f, 2.0f,
2590
2591 1.0f, 2.0f, -2.0f,
2592 0.2f, 1.0f, 2.0f,
2593
2594 0.0f, 2.0f, 1.0f,
2595 4.2f, 0.0f, -3.0f,
2596
2597 0.0f, 0.0f, 1.0f,
2598 0.7f, 1.0f, 5.0f,
2599 }));
2600
2601 LayerTestResult<float,4> ret(outputTensorInfo);
2602 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2603 {
2604 1.0f, 4.0f, 2.0f,
2605 0.2f, 2.0f, 4.0f,
2606
2607 2.0f, 4.0f, -1.0f,
2608 0.4f, 2.0f, 4.0f,
2609
2610 0.0f, 4.0f, 2.0f,
2611 8.4f, 1.0f, -1.0f,
2612
2613 0.0f, 0.0f, 2.0f,
2614 0.9f, 2.0f, 7.0f,
2615 }));
2616
2617 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2618 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2619 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2620
2621 armnn::AdditionQueueDescriptor data;
2622 armnn::WorkloadInfo info;
2623 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2624 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2625 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2626
2627 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2628
2629 inputHandle1->Allocate();
2630 inputHandle2->Allocate();
2631 outputHandle->Allocate();
2632
2633 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2634 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2635
Derek Lambertif30f7d32019-04-09 10:25:02 +01002636 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002637 workload->Execute();
2638
2639 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2640
2641 return ret;
2642}
2643
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002644template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002645LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2646 armnn::IWorkloadFactory& workloadFactory,
2647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002648 float qScale,
2649 int32_t qOffset)
2650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002651 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2652 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2653 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002654
2655 if (armnn::IsQuantizedType<T>())
2656 {
2657 inputTensorInfo1.SetQuantizationScale(qScale);
2658 inputTensorInfo1.SetQuantizationOffset(qOffset);
2659 inputTensorInfo2.SetQuantizationScale(qScale);
2660 inputTensorInfo2.SetQuantizationOffset(qOffset);
2661 outputTensorInfo.SetQuantizationScale(qScale);
2662 outputTensorInfo.SetQuantizationOffset(qOffset);
2663 }
2664
2665 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2666 {
2667 0.0f,
2668 1.0f,
2669
2670 2.0f,
2671 3.0f,
2672
2673 4.0f,
2674 5.0f,
2675 }));
2676
2677 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2678 {
2679 0.5f, 1.5f, 2.5f,
2680 3.5f, 4.5f, 5.5f,
2681 }));
2682
2683 LayerTestResult<T,4> ret(outputTensorInfo);
2684 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2685 {
2686 0.5f, 1.5f, 2.5f,
2687 4.5f, 5.5f, 6.5f,
2688
2689 2.5f, 3.5f, 4.5f,
2690 6.5f, 7.5f, 8.5f,
2691
2692 4.5f, 5.5f, 6.5f,
2693 8.5f, 9.5f, 10.5f,
2694 }));
2695
2696 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2697 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2698 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2699
2700 armnn::AdditionQueueDescriptor data;
2701 armnn::WorkloadInfo info;
2702 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2703 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2704 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2705
2706 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2707
2708 inputHandle1->Allocate();
2709 inputHandle2->Allocate();
2710 outputHandle->Allocate();
2711
2712 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2713 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2714
Derek Lambertif30f7d32019-04-09 10:25:02 +01002715 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002716 workload->Execute();
2717
2718 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2719
2720 return ret;
2721}
2722
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002723template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002724LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2725 armnn::IWorkloadFactory& workloadFactory,
2726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002727 float qScale,
2728 int32_t qOffset)
2729{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002730 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2731 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2732 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002733
2734 if (armnn::IsQuantizedType<T>())
2735 {
2736 inputTensorInfo1.SetQuantizationScale(qScale);
2737 inputTensorInfo1.SetQuantizationOffset(qOffset);
2738 inputTensorInfo2.SetQuantizationScale(qScale);
2739 inputTensorInfo2.SetQuantizationOffset(qOffset);
2740 outputTensorInfo.SetQuantizationScale(qScale);
2741 outputTensorInfo.SetQuantizationOffset(qOffset);
2742 }
2743
2744 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2745 {
2746 0.0f, 1.0f, 2.0f,
2747 3.0f, 4.0f, 5.0f,
2748 6.0f, 7.0f, 8.0f,
2749 9.0f, 10.0f, 11.0f,
2750 12.0f, 13.0f, 14.0f,
2751 15.0f, 16.0f, 17.0f,
2752 }));
2753
2754 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2755 {
2756 0.5f,
2757 }));
2758
2759 LayerTestResult<T,4> ret(outputTensorInfo);
2760 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2761 {
2762 0.5f, 1.5f, 2.5f,
2763 3.5f, 4.5f, 5.5f,
2764 6.5f, 7.5f, 8.5f,
2765 9.5f, 10.5f, 11.5f,
2766 12.5f, 13.5f, 14.5f,
2767 15.5f, 16.5f, 17.5f,
2768 }));
2769
2770 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2771 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2772 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2773
2774 armnn::AdditionQueueDescriptor data;
2775 armnn::WorkloadInfo info;
2776 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2777 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2778 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2779
2780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2781
2782 inputHandle1->Allocate();
2783 inputHandle2->Allocate();
2784 outputHandle->Allocate();
2785
2786 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2787 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2788
Derek Lambertif30f7d32019-04-09 10:25:02 +01002789 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002790 workload->Execute();
2791
2792 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2793
2794 return ret;
2795}
2796
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002797LayerTestResult<float, 4> AdditionBroadcastTest(
2798 armnn::IWorkloadFactory& workloadFactory,
2799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002801 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2802 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002803}
2804
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002805LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002808{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002809 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2810 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002811}
2812
Sadik Armagan2999a022019-04-09 14:20:12 +01002813LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2814 armnn::IWorkloadFactory& workloadFactory,
2815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2816{
2817 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2818 workloadFactory, memoryManager, 2.f, 0);
2819}
2820
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002821LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002824{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002825 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2826 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002827}
2828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002829LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2830 armnn::IWorkloadFactory& workloadFactory,
2831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002832{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002833 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2834 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002835}
2836
Sadik Armagan2999a022019-04-09 14:20:12 +01002837LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2838 armnn::IWorkloadFactory& workloadFactory,
2839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2840{
2841 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2842 workloadFactory, memoryManager, 0.1333333f, 0);
2843}
2844
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002845LayerTestResult<float,4> CompareAdditionTest(
2846 armnn::IWorkloadFactory& workloadFactory,
2847 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2848 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002849{
2850 unsigned int batchSize = 4;
2851 unsigned int channels = 1;
2852 unsigned int height = 2;
2853 unsigned int width = 3;
2854
2855 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2856 armnn::TensorInfo outputTensorInfo;
2857
2858 unsigned int shape[] = {batchSize, channels, height, width};
2859
2860 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2861 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2862 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2863
2864 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2865 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2866
2867 LayerTestResult<float,4> ret(outputTensorInfo);
2868
2869 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2870 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2871 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2872
2873 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2874 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2875 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2876
2877 armnn::AdditionQueueDescriptor data;
2878 armnn::WorkloadInfo info;
2879 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2880 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2881 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2882
2883 armnn::AdditionQueueDescriptor refData = data;
2884 armnn::WorkloadInfo refInfo = info;
2885 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2886 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2887 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2888
2889 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2890 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2891
2892 inputHandle1->Allocate();
2893 inputHandle2->Allocate();
2894 outputHandle->Allocate();
2895 inputHandle1Ref->Allocate();
2896 inputHandle2Ref->Allocate();
2897 outputHandleRef->Allocate();
2898
2899 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2900 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2901 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2902 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2903
Derek Lambertif30f7d32019-04-09 10:25:02 +01002904 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002905 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002906 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002907 workloadRef->Execute();
2908
2909 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2910 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2911
2912 return ret;
2913}
2914
surmeh01bceff2f2018-03-29 16:29:27 +01002915namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002916template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002917LayerTestResult<T, 4> DivisionTestHelper(
2918 armnn::IWorkloadFactory& workloadFactory,
2919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2920 const unsigned int shape0[4],
2921 const std::vector<T>& values0,
2922 float scale0,
2923 int32_t offset0,
2924 const unsigned int shape1[4],
2925 const std::vector<T> & values1,
2926 float scale1,
2927 int32_t offset1,
2928 const unsigned int outShape[4],
2929 const std::vector<T> & outValues,
2930 float outScale,
2931 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002932{
Sadik Armagan2999a022019-04-09 14:20:12 +01002933 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2934 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2935 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002936
David Beck5cd01f32018-09-12 16:00:08 +01002937 inputTensorInfo0.SetQuantizationScale(scale0);
2938 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002939
David Beck5cd01f32018-09-12 16:00:08 +01002940 inputTensorInfo1.SetQuantizationScale(scale1);
2941 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002942
David Beck5cd01f32018-09-12 16:00:08 +01002943 outputTensorInfo.SetQuantizationScale(outScale);
2944 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002945
David Beck5cd01f32018-09-12 16:00:08 +01002946 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2947 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002948
David Beck5cd01f32018-09-12 16:00:08 +01002949 LayerTestResult<T, 4> result(outputTensorInfo);
2950 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002951
David Beck5cd01f32018-09-12 16:00:08 +01002952 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2953 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2954 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002955
David Beck5cd01f32018-09-12 16:00:08 +01002956 armnn::DivisionQueueDescriptor data;
2957 armnn::WorkloadInfo info;
2958 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2959 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2960 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002961
David Beck5cd01f32018-09-12 16:00:08 +01002962 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002963
David Beck5cd01f32018-09-12 16:00:08 +01002964 inputHandle0->Allocate();
2965 inputHandle1->Allocate();
2966 outputHandle->Allocate();
2967
2968 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2969 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2970
Derek Lambertif30f7d32019-04-09 10:25:02 +01002971 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002972 workload->Execute();
2973
2974 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2975
2976 return result;
2977}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002978} // anonymous namespace
2979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002980LayerTestResult<float,4> DivisionByZeroTest(
2981 armnn::IWorkloadFactory& workloadFactory,
2982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002983{
2984 const unsigned int width = 2;
2985 const unsigned int height = 2;
2986 const unsigned int channelCount = 2;
2987 const unsigned int batchSize = 2;
2988
2989 unsigned int shape[] = { batchSize, channelCount, height, width };
2990
2991 std::vector<float> input0({
2992 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2993 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2994
2995 std::vector<float> input1({
2996 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2997 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2998
2999 std::vector<float> output({
3000 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
3001 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
3002
Sadik Armagan2999a022019-04-09 14:20:12 +01003003 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3004 memoryManager,
3005 shape, input0, 1.0f, 0,
3006 shape, input1, 1.0f, 0,
3007 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01003008}
3009
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003010LayerTestResult<float,4> DivisionTest(
3011 armnn::IWorkloadFactory& workloadFactory,
3012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003013{
3014 const unsigned int width = 2;
3015 const unsigned int height = 2;
3016 const unsigned int channelCount = 2;
3017 const unsigned int batchSize = 2;
3018
3019 unsigned int shape[] = { batchSize, channelCount, height, width };
3020
3021 std::vector<float> input0({
3022 2, 2, 2, 2, 3, 3, 3, 3,
3023 4, 4, 4, 4, 5, 5, 5, 5 });
3024
3025 std::vector<float> input1({
3026 1, 1, 1, 1, 2, 2, 2, 2,
3027 4, 4, 4, 4, 4, 4, 4, 4 });
3028
3029 std::vector<float> output({
3030 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
3031 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
3032
David Beck5cd01f32018-09-12 16:00:08 +01003033
Sadik Armagan2999a022019-04-09 14:20:12 +01003034 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3035 memoryManager,
3036 shape, input0, 1.0f, 0,
3037 shape, input1, 1.0f, 0,
3038 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003039}
3040
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003041LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3042 armnn::IWorkloadFactory& workloadFactory,
3043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003044{
3045 unsigned int shape0[] = { 1, 2, 2, 2 };
3046 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3047
3048 unsigned int shape1[] = { 1, 1, 1, 1 };
3049 std::vector<float> input1({ 2 });
3050
3051 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3052
David Beck5cd01f32018-09-12 16:00:08 +01003053
Sadik Armagan2999a022019-04-09 14:20:12 +01003054 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3055 memoryManager,
3056 shape0, input0, 1.0f, 0,
3057 shape1, input1, 1.0f, 0,
3058 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003059}
3060
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003061LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3062 armnn::IWorkloadFactory& workloadFactory,
3063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003064{
3065 unsigned int shape0[] = { 1, 3, 3, 2 };
3066 std::vector<float> input0({
3067 1, 4, 3, 8, 5, 12,
3068 7, 16, 9, 20, 11, 24,
3069 13, 28, 15, 32, 17, 36});
3070
3071 unsigned int shape1[] = { 1, 1, 1, 2 };
3072 std::vector<float> input1({ 1, 2 });
3073
3074 std::vector<float> output({
3075 1, 2, 3, 4, 5, 6,
3076 7, 8, 9, 10, 11, 12,
3077 13, 14, 15, 16, 17, 18});
3078
Sadik Armagan2999a022019-04-09 14:20:12 +01003079 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3080 memoryManager,
3081 shape0, input0, 1.0f, 0,
3082 shape1, input1, 1.0f, 0,
3083 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003084}
3085
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003086LayerTestResult<uint8_t,4> DivisionUint8Test(
3087 armnn::IWorkloadFactory& workloadFactory,
3088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003089{
3090 const unsigned int width = 2;
3091 const unsigned int height = 2;
3092 const unsigned int channelCount = 2;
3093 const unsigned int batchSize = 2;
3094
3095 unsigned int shape[] = { batchSize, channelCount, height, width };
3096
3097 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3098 4, 4, 4, 4, 5, 5, 5, 5 });
3099
3100 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3101 4, 4, 4, 4, 4, 4, 4, 4 });
3102
3103 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3104 4, 4, 4, 4, 5, 5, 5, 5});
3105
3106
Sadik Armagan2999a022019-04-09 14:20:12 +01003107 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3108 memoryManager,
3109 shape, input0, 1.0f, 0,
3110 shape, input1, 1.0f, 0,
3111 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003112}
3113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003114LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3115 armnn::IWorkloadFactory& workloadFactory,
3116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003117{
3118 unsigned int shape0[] = { 1, 2, 2, 2 };
3119 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3120
3121 unsigned int shape1[] = { 1, 1, 1, 1 };
3122 std::vector<uint8_t> input1({ 2 });
3123
3124 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3125
Sadik Armagan2999a022019-04-09 14:20:12 +01003126 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3127 memoryManager,
3128 shape0, input0, 1.0f, 0,
3129 shape1, input1, 1.0f, 0,
3130 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003131}
3132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003133LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3134 armnn::IWorkloadFactory& workloadFactory,
3135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003136{
3137 unsigned int shape0[] = { 1, 3, 3, 2 };
3138 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
3139 7, 16, 9, 20, 11, 24,
3140 13, 28, 15, 32, 17, 36});
3141
3142 unsigned int shape1[] = { 1, 1, 1, 2 };
3143 std::vector<uint8_t> input1({ 1, 2 });
3144
3145 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
3146 7, 8, 9, 10, 11, 12,
3147 13, 14, 15, 16, 17, 18});
3148
Sadik Armagan2999a022019-04-09 14:20:12 +01003149 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3150 memoryManager,
3151 shape0, input0, 1.0f, 0,
3152 shape1, input1, 1.0f, 0,
3153 shape0, output, 1.0f, 0);
3154}
3155
3156LayerTestResult<int16_t,4> DivisionInt16Test(
3157 armnn::IWorkloadFactory& workloadFactory,
3158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3159{
3160 unsigned int shape[] = { 2, 2, 2, 2 };
3161
3162 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3163 4, 4, 4, 4, 5, 5, 5, 5 });
3164
3165 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3166 4, 4, 4, 4, 4, 4, 4, 4 });
3167
3168 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3169 4, 4, 4, 4, 5, 5, 5, 5});
3170
3171
3172 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3173 memoryManager,
3174 shape, input0, 1.0f, 0,
3175 shape, input1, 1.0f, 0,
3176 shape, output, 0.25f, 0);
3177}
3178
3179LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3180 armnn::IWorkloadFactory& workloadFactory,
3181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3182{
3183 unsigned int shape0[] = { 1, 2, 2, 2 };
3184 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3185
3186 unsigned int shape1[] = { 1, 1, 1, 1 };
3187 std::vector<int16_t> input1({ 2 });
3188
3189 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3190
3191 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3192 memoryManager,
3193 shape0, input0, 1.0f, 0,
3194 shape1, input1, 1.0f, 0,
3195 shape0, output, 1.0f, 0);
3196}
3197
3198LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3199 armnn::IWorkloadFactory& workloadFactory,
3200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3201{
3202 unsigned int shape0[] = { 1, 3, 3, 2 };
3203 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
3204 7, 16, 9, 20, 11, 24,
3205 13, 28, 15, 32, 17, 36});
3206
3207 unsigned int shape1[] = { 1, 1, 1, 2 };
3208 std::vector<int16_t> input1({ 1, 2 });
3209
3210 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
3211 7, 8, 9, 10, 11, 12,
3212 13, 14, 15, 16, 17, 18});
3213
3214 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3215 memoryManager,
3216 shape0, input0, 1.0f, 0,
3217 shape1, input1, 1.0f, 0,
3218 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003219}
3220
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003221template<typename DescriptorType>
3222std::unique_ptr<armnn::IWorkload> CreateWorkload(
3223 const armnn::IWorkloadFactory& workloadFactory,
3224 const armnn::WorkloadInfo& info,
3225 const DescriptorType& descriptor)
3226{
3227 return CreateWorkload(workloadFactory, info, descriptor);
3228};
3229
3230template<>
3231std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3232 const armnn::IWorkloadFactory& workloadFactory,
3233 const armnn::WorkloadInfo& info,
3234 const armnn::MaximumQueueDescriptor& descriptor)
3235{
3236 return workloadFactory.CreateMaximum(descriptor, info);
3237}
3238
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003239template<>
3240std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3241 const armnn::IWorkloadFactory& workloadFactory,
3242 const armnn::WorkloadInfo& info,
3243 const armnn::MinimumQueueDescriptor& descriptor)
3244{
3245 return workloadFactory.CreateMinimum(descriptor, info);
3246}
3247
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003248template<>
3249std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3250 const armnn::IWorkloadFactory& workloadFactory,
3251 const armnn::WorkloadInfo& info,
3252 const armnn::EqualQueueDescriptor& descriptor)
3253{
3254 return workloadFactory.CreateEqual(descriptor, info);
3255}
3256
FrancisMurtagh878f0232018-12-19 10:56:15 +00003257template<>
3258std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3259 const armnn::IWorkloadFactory& workloadFactory,
3260 const armnn::WorkloadInfo& info,
3261 const armnn::GreaterQueueDescriptor& descriptor)
3262{
3263 return workloadFactory.CreateGreater(descriptor, info);
3264}
3265
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003266namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00003267
3268template <typename Descriptor,
3269 armnn::DataType ArmnnTypeInput,
3270 armnn::DataType ArmnnTypeOutput,
3271 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3272 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3273LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3274 armnn::IWorkloadFactory & workloadFactory,
3275 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3276 const unsigned int shape0[4], std::vector<TInput> values0,
3277 const unsigned int shape1[4], std::vector<TInput> values1,
3278 const unsigned int outShape[4], std::vector<TOutput> outValues,
3279 float qScale = 0.0f, int qOffset = 0)
3280{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01003281 const uint32_t dimensionCount = 4;
kevmay012b4d88e2019-01-24 14:05:09 +00003282 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3283 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3284 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3285
3286 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3287 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3288
3289 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003290 {
kevmay012b4d88e2019-01-24 14:05:09 +00003291 inputTensorInfo0.SetQuantizationScale(qScale);
3292 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003293
kevmay012b4d88e2019-01-24 14:05:09 +00003294 inputTensorInfo1.SetQuantizationScale(qScale);
3295 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003296
kevmay012b4d88e2019-01-24 14:05:09 +00003297 outputTensorInfo.SetQuantizationScale(qScale);
3298 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003299 }
kevmay012b4d88e2019-01-24 14:05:09 +00003300
3301 LayerTestResult<TOutput,4> ret(outputTensorInfo);
3302
3303 if(ArmnnTypeOutput == armnn::DataType::Boolean)
3304 {
3305 ret.compareBoolean = true;
3306 }
3307
3308 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3309 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3310 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3311
3312 Descriptor data;
3313 armnn::WorkloadInfo info;
3314 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3315 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3316 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3317 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3318
3319 inputHandle0->Allocate();
3320 inputHandle1->Allocate();
3321 outputHandle->Allocate();
3322
3323 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3324 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3325
Derek Lambertif30f7d32019-04-09 10:25:02 +01003326 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00003327 ExecuteWorkload(*workload, memoryManager);
3328
3329 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3330
3331 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3332 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003333}
3334
kevmay012b4d88e2019-01-24 14:05:09 +00003335template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3336LayerTestResult<T, 4> ElementwiseTestHelper(
3337 armnn::IWorkloadFactory & workloadFactory,
3338 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3339 const unsigned int shape0[4], std::vector<T> values0,
3340 const unsigned int shape1[4], std::vector<T> values1,
3341 const unsigned int outShape[4], std::vector<T> outValues,
3342 float qScale = 0.0f, int qOffset = 0)
3343{
3344 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3345 (workloadFactory,
3346 memoryManager,
3347 shape0,
3348 values0,
3349 shape1,
3350 values1,
3351 outShape,
3352 outValues,
3353 qScale,
3354 qOffset);
3355}
3356}
3357
3358LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003360{
3361 const unsigned int width = 2;
3362 const unsigned int height = 2;
3363 const unsigned int channelCount = 2;
3364 const unsigned int batchSize = 2;
3365
3366 unsigned int shape[] = { batchSize, channelCount, height, width };
3367
3368 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3369 3, 3, 3, 3, 4, 4, 4, 4 });
3370
3371 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3372 5, 5, 5, 5, 4, 4, 4, 4 });
3373
kevmay012b4d88e2019-01-24 14:05:09 +00003374 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3375 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003376
kevmay012b4d88e2019-01-24 14:05:09 +00003377 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003378 workloadFactory,
3379 memoryManager,
3380 shape,
3381 input0,
3382 shape,
3383 input1,
3384 shape,
3385 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003386}
3387
kevmay012b4d88e2019-01-24 14:05:09 +00003388LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003389 armnn::IWorkloadFactory& workloadFactory,
3390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3391{
3392 unsigned int shape0[] = { 1, 2, 2, 2 };
3393 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3394
3395 unsigned int shape1[] = { 1, 1, 1, 1 };
3396 std::vector<float> input1({ 1 });
3397
kevmay012b4d88e2019-01-24 14:05:09 +00003398 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003399
kevmay012b4d88e2019-01-24 14:05:09 +00003400 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003401 workloadFactory,
3402 memoryManager,
3403 shape0,
3404 input0,
3405 shape1,
3406 input1,
3407 shape0,
3408 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003409}
3410
kevmay012b4d88e2019-01-24 14:05:09 +00003411LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003412 armnn::IWorkloadFactory& workloadFactory,
3413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3414{
3415 const unsigned int shape0[] = { 1, 2, 2, 3 };
3416 const unsigned int shape1[] = { 1, 1, 1, 3 };
3417
3418 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3419 7, 8, 9, 10, 11, 12 });
3420
3421 std::vector<float> input1({ 1, 2, 3});
3422
kevmay012b4d88e2019-01-24 14:05:09 +00003423 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3424 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003425
kevmay012b4d88e2019-01-24 14:05:09 +00003426 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003427 workloadFactory,
3428 memoryManager,
3429 shape0,
3430 input0,
3431 shape1,
3432 input1,
3433 shape0,
3434 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003435}
3436
3437LayerTestResult<uint8_t, 4> EqualUint8Test(
3438 armnn::IWorkloadFactory& workloadFactory,
3439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3440{
3441 unsigned int shape[] = { 2, 2, 2, 2 };
3442
3443 // See dequantized values to the right.
3444 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003445 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003446
3447 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3448 3, 3, 3, 3, 5, 5, 5, 5 });
3449
3450 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3451 1, 1, 1, 1, 0, 0, 0, 0 });
3452
kevmay012b4d88e2019-01-24 14:05:09 +00003453 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3454 armnn::DataType::QuantisedAsymm8,
3455 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003456 workloadFactory,
3457 memoryManager,
3458 shape,
3459 input0,
3460 shape,
3461 input1,
3462 shape,
3463 output,
3464 1.0f,
3465 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003466}
3467
3468LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3469 armnn::IWorkloadFactory& workloadFactory,
3470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3471{
3472 const unsigned int shape0[] = { 1, 2, 2, 3 };
3473 const unsigned int shape1[] = { 1, 1, 1, 1 };
3474
3475 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3476 7, 8, 9, 10, 11, 12 });
3477
3478 std::vector<uint8_t> input1({ 1 });
3479
3480 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3481 0, 0, 0, 0, 0, 0 });
3482
kevmay012b4d88e2019-01-24 14:05:09 +00003483 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3484 armnn::DataType::QuantisedAsymm8,
3485 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003486 workloadFactory,
3487 memoryManager,
3488 shape0,
3489 input0,
3490 shape1,
3491 input1,
3492 shape0,
3493 output,
3494 1.0f,
3495 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003496}
3497
3498LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3499 armnn::IWorkloadFactory& workloadFactory,
3500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3501{
3502 const unsigned int shape0[] = { 1, 2, 2, 3 };
3503 const unsigned int shape1[] = { 1, 1, 1, 3 };
3504
3505 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3506 7, 8, 9, 10, 11, 12 });
3507
3508 std::vector<uint8_t> input1({ 1, 1, 3});
3509
3510 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3511 0, 0, 0, 0, 0, 0 });
3512
kevmay012b4d88e2019-01-24 14:05:09 +00003513 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3514 armnn::DataType::QuantisedAsymm8,
3515 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003516 workloadFactory,
3517 memoryManager,
3518 shape0,
3519 input0,
3520 shape1,
3521 input1,
3522 shape0,
3523 output,
3524 1.0f,
3525 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003526}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003527
kevmay012b4d88e2019-01-24 14:05:09 +00003528LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3530{
3531 const unsigned int width = 2;
3532 const unsigned int height = 2;
3533 const unsigned int channelCount = 2;
3534 const unsigned int batchSize = 2;
3535
3536 unsigned int shape[] = { batchSize, channelCount, height, width };
3537
3538 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3539 3, 3, 3, 3, 4, 4, 4, 4 });
3540
3541 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3542 5, 5, 5, 5, 4, 4, 4, 4 });
3543
kevmay012b4d88e2019-01-24 14:05:09 +00003544 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3545 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003546
kevmay012b4d88e2019-01-24 14:05:09 +00003547 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003548 workloadFactory,
3549 memoryManager,
3550 shape,
3551 input0,
3552 shape,
3553 input1,
3554 shape,
3555 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003556}
3557
kevmay012b4d88e2019-01-24 14:05:09 +00003558LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003559 armnn::IWorkloadFactory& workloadFactory,
3560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3561{
3562 unsigned int shape0[] = { 1, 2, 2, 2 };
3563 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3564
3565 unsigned int shape1[] = { 1, 1, 1, 1 };
3566 std::vector<float> input1({ 1 });
3567
kevmay012b4d88e2019-01-24 14:05:09 +00003568 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003569
kevmay012b4d88e2019-01-24 14:05:09 +00003570 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003571 workloadFactory,
3572 memoryManager,
3573 shape0,
3574 input0,
3575 shape1,
3576 input1,
3577 shape0,
3578 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003579}
3580
kevmay012b4d88e2019-01-24 14:05:09 +00003581LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003582 armnn::IWorkloadFactory& workloadFactory,
3583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3584{
3585 const unsigned int shape0[] = { 1, 2, 2, 3 };
3586 const unsigned int shape1[] = { 1, 1, 1, 3 };
3587
3588 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3589 7, 8, 9, 10, 11, 12 });
3590
3591 std::vector<float> input1({ 1, 3, 2});
3592
kevmay012b4d88e2019-01-24 14:05:09 +00003593 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3594 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003595
kevmay012b4d88e2019-01-24 14:05:09 +00003596 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003597 workloadFactory,
3598 memoryManager,
3599 shape0,
3600 input0,
3601 shape1,
3602 input1,
3603 shape0,
3604 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003605}
3606
3607LayerTestResult<uint8_t, 4> GreaterUint8Test(
3608 armnn::IWorkloadFactory& workloadFactory,
3609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3610{
3611 unsigned int shape[] = { 2, 2, 2, 2 };
3612
3613 // See dequantized values to the right.
3614 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3615 3, 3, 3, 3, 5, 5, 5, 5 });
3616
3617 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3618 2, 2, 2, 2, 5, 5, 5, 5 });
3619
3620 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3621 1, 1, 1, 1, 0, 0, 0, 0 });
3622
kevmay012b4d88e2019-01-24 14:05:09 +00003623 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3624 armnn::DataType::QuantisedAsymm8,
3625 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003626 workloadFactory,
3627 memoryManager,
3628 shape,
3629 input0,
3630 shape,
3631 input1,
3632 shape,
3633 output,
3634 1.0f,
3635 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003636}
3637
3638LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3639 armnn::IWorkloadFactory& workloadFactory,
3640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3641{
3642 const unsigned int shape0[] = { 1, 2, 2, 3 };
3643 const unsigned int shape1[] = { 1, 1, 1, 1 };
3644
3645 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3646 7, 8, 9, 10, 11, 12 });
3647
3648 std::vector<uint8_t> input1({ 1 });
3649
3650 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3651 1, 1, 1, 1, 1, 1 });
3652
kevmay012b4d88e2019-01-24 14:05:09 +00003653 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3654 armnn::DataType::QuantisedAsymm8,
3655 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003656 workloadFactory,
3657 memoryManager,
3658 shape0,
3659 input0,
3660 shape1,
3661 input1,
3662 shape0,
3663 output,
3664 1.0f,
3665 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003666}
3667
3668LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3669 armnn::IWorkloadFactory& workloadFactory,
3670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3671{
3672 const unsigned int shape0[] = { 1, 2, 2, 3 };
3673 const unsigned int shape1[] = { 1, 1, 1, 3 };
3674
3675 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3676 7, 8, 9, 10, 11, 12 });
3677
3678 std::vector<uint8_t> input1({ 1, 1, 3});
3679
3680 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3681 1, 1, 1, 1, 1, 1 });
3682
kevmay012b4d88e2019-01-24 14:05:09 +00003683 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3684 armnn::DataType::QuantisedAsymm8,
3685 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003686 workloadFactory,
3687 memoryManager,
3688 shape0,
3689 input0,
3690 shape1,
3691 input1,
3692 shape0,
3693 output,
3694 1.0f,
3695 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003696}
3697
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003698LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3700{
3701 const unsigned int width = 2;
3702 const unsigned int height = 2;
3703 const unsigned int channelCount = 2;
3704 const unsigned int batchSize = 2;
3705
3706 unsigned int shape[] = { batchSize, channelCount, height, width };
3707
3708 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3709 3, 3, 3, 3, 4, 4, 4, 4 });
3710
3711 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3712 4, 4, 4, 4, 5, 5, 5, 5 });
3713
3714 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3715 4, 4, 4, 4, 5, 5, 5, 5 });
3716
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003717 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3718 workloadFactory,
3719 memoryManager,
3720 shape,
3721 input0,
3722 shape,
3723 input1,
3724 shape,
3725 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003726}
3727
3728LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3729 armnn::IWorkloadFactory& workloadFactory,
3730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3731{
3732 unsigned int shape0[] = { 1, 2, 2, 2 };
3733 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3734
3735 unsigned int shape1[] = { 1, 1, 1, 1 };
3736 std::vector<float> input1({ 2 });
3737
3738 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3739
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003740 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3741 workloadFactory,
3742 memoryManager,
3743 shape0,
3744 input0,
3745 shape1,
3746 input1,
3747 shape0,
3748 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003749}
3750
3751LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3752 armnn::IWorkloadFactory& workloadFactory,
3753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3754{
3755 const unsigned int shape0[] = { 1, 2, 2, 3 };
3756 const unsigned int shape1[] = { 1, 1, 1, 3 };
3757
3758 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3759 7, 8, 9, 10, 11, 12 });
3760
3761 std::vector<float> input1({ 1, 2, 3});
3762
3763 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003764 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003765
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003766 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3767 workloadFactory,
3768 memoryManager,
3769 shape0,
3770 input0,
3771 shape1,
3772 input1,
3773 shape0,
3774 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003775}
3776
3777LayerTestResult<uint8_t, 4> MaximumUint8Test(
3778 armnn::IWorkloadFactory& workloadFactory,
3779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3780{
3781 unsigned int shape[] = { 2, 2, 2, 2 };
3782
3783 // See dequantized values to the right.
3784 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3785 3, 3, 3, 3, 4, 4, 4, 4 });
3786
3787 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3788 4, 4, 4, 4, 5, 5, 5, 5 });
3789
3790 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3791 4, 4, 4, 4, 5, 5, 5, 5 });
3792
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003793 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3794 workloadFactory,
3795 memoryManager,
3796 shape,
3797 input0,
3798 shape,
3799 input1,
3800 shape,
3801 output,
3802 1.0f,
3803 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003804}
3805
3806LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3807 armnn::IWorkloadFactory& workloadFactory,
3808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3809{
3810 const unsigned int shape0[] = { 1, 2, 2, 3 };
3811 const unsigned int shape1[] = { 1, 1, 1, 1 };
3812
3813 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3814 7, 8, 9, 10, 11, 12 });
3815
3816 std::vector<uint8_t> input1({2});
3817
3818 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3819 7, 8, 9, 10, 11, 12 });
3820
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003821 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3822 workloadFactory,
3823 memoryManager,
3824 shape0,
3825 input0,
3826 shape1,
3827 input1,
3828 shape0,
3829 output,
3830 1.0f,
3831 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003832}
3833
3834LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3835 armnn::IWorkloadFactory& workloadFactory,
3836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3837{
3838 const unsigned int shape0[] = { 1, 2, 2, 3 };
3839 const unsigned int shape1[] = { 1, 1, 1, 3 };
3840
3841 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3842 7, 8, 9, 10, 11, 12 });
3843
3844 std::vector<uint8_t> input1({ 1, 10, 3});
3845
3846 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3847 7, 10, 9, 10, 11, 12 });
3848
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003849 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3850 workloadFactory,
3851 memoryManager,
3852 shape0,
3853 input0,
3854 shape1,
3855 input1,
3856 shape0,
3857 output,
3858 1.0f,
3859 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003860}
3861
Sadik Armagan2999a022019-04-09 14:20:12 +01003862LayerTestResult<int16_t, 4> MaximumInt16Test(
3863 armnn::IWorkloadFactory& workloadFactory,
3864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3865{
3866 unsigned int shape[] = { 2, 2, 2, 2 };
3867
3868 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3869 3, 3, 3, 3, 4, 4, 4, 4 });
3870
3871 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3872 4, 4, 4, 4, 5, 5, 5, 5 });
3873
3874 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3875 4, 4, 4, 4, 5, 5, 5, 5 });
3876
3877 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3878 workloadFactory,
3879 memoryManager,
3880 shape,
3881 input0,
3882 shape,
3883 input1,
3884 shape,
3885 output,
3886 1.0f,
3887 0);
3888}
3889
3890LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3891 armnn::IWorkloadFactory& workloadFactory,
3892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3893{
3894 const unsigned int shape0[] = { 1, 2, 2, 3 };
3895 const unsigned int shape1[] = { 1, 1, 1, 1 };
3896
3897 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3898 7, 8, 9, 10, 11, 12 });
3899
3900 std::vector<int16_t> input1({2});
3901
3902 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3903 7, 8, 9, 10, 11, 12 });
3904
3905 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3906 workloadFactory,
3907 memoryManager,
3908 shape0,
3909 input0,
3910 shape1,
3911 input1,
3912 shape0,
3913 output,
3914 1.0f,
3915 0);
3916}
3917
3918LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3919 armnn::IWorkloadFactory& workloadFactory,
3920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3921{
3922 const unsigned int shape0[] = { 1, 2, 2, 3 };
3923 const unsigned int shape1[] = { 1, 1, 1, 3 };
3924
3925 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3926 7, 8, 9, 10, 11, 12 });
3927
3928 std::vector<int16_t> input1({ 1, 10, 3});
3929
3930 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3931 7, 10, 9, 10, 11, 12 });
3932
3933 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3934 workloadFactory,
3935 memoryManager,
3936 shape0,
3937 input0,
3938 shape1,
3939 input1,
3940 shape0,
3941 output,
3942 1.0f,
3943 0);
3944}
3945
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003946LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3947 armnn::IWorkloadFactory& workloadFactory,
3948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3949{
3950 unsigned int shape0[] = { 1, 2, 2, 2 };
3951 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3952
3953 unsigned int shape1[] = { 1, 1, 1, 1 };
3954 std::vector<float> input1({ 2 });
3955
3956 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3957
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003958 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3959 workloadFactory,
3960 memoryManager,
3961 shape0,
3962 input0,
3963 shape1,
3964 input1,
3965 shape0,
3966 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003967}
3968
3969
3970LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3971 armnn::IWorkloadFactory& workloadFactory,
3972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3973{
3974 unsigned int shape0[] = { 1, 2, 2, 2 };
3975 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3976
3977 unsigned int shape1[] = { 1, 1, 1, 1 };
3978 std::vector<float> input1({ 5 });
3979
3980 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3981
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003982 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3983 workloadFactory,
3984 memoryManager,
3985 shape0,
3986 input0,
3987 shape1,
3988 input1,
3989 shape0,
3990 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003991}
3992
3993LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3994 armnn::IWorkloadFactory & workloadFactory,
3995 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3996{
3997 const unsigned int shape0[] = { 1, 2, 2, 3 };
3998 const unsigned int shape1[] = { 1, 1, 1, 3 };
3999
4000 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
4001 7, 1, 2, 3, 4, 5 });
4002
4003 std::vector<uint8_t> input1({ 1, 2, 3});
4004
4005 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
4006 1, 1, 2, 1, 2, 3 });
4007
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004008 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
4009 workloadFactory,
4010 memoryManager,
4011 shape0,
4012 input0,
4013 shape1,
4014 input1,
4015 shape0,
4016 output,
4017 1.0f,
4018 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004019}
4020
Sadik Armagan2999a022019-04-09 14:20:12 +01004021LayerTestResult<int16_t, 4> MinimumInt16Test(
4022 armnn::IWorkloadFactory& workloadFactory,
4023 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4024{
4025 unsigned int shape[] = { 2, 2, 2, 2 };
4026
4027 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4028 3, 3, 3, 3, 4, 4, 4, 4 });
4029
4030 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4031 4, 4, 4, 4, 5, 5, 5, 5 });
4032
4033 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4034 3, 3, 3, 3, 4, 4, 4, 4 });
4035
4036 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4037 workloadFactory,
4038 memoryManager,
4039 shape,
4040 input0,
4041 shape,
4042 input1,
4043 shape,
4044 output,
4045 1.0f,
4046 0);
4047}
4048
4049LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4050 armnn::IWorkloadFactory& workloadFactory,
4051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4052{
4053 const unsigned int shape0[] = { 1, 2, 2, 3 };
4054 const unsigned int shape1[] = { 1, 1, 1, 1 };
4055
4056 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4057 7, 8, 9, 10, 11, 12 });
4058
4059 std::vector<int16_t> input1({2});
4060
4061 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4062 2, 2, 2, 2, 2, 2 });
4063
4064 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4065 workloadFactory,
4066 memoryManager,
4067 shape0,
4068 input0,
4069 shape1,
4070 input1,
4071 shape0,
4072 output,
4073 1.0f,
4074 0);
4075}
4076
4077LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4078 armnn::IWorkloadFactory& workloadFactory,
4079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4080{
4081 const unsigned int shape0[] = { 1, 2, 2, 3 };
4082 const unsigned int shape1[] = { 1, 1, 1, 3 };
4083
4084 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4085 7, 8, 9, 10, 11, 12 });
4086
4087 std::vector<int16_t> input1({ 1, 10, 3});
4088
4089 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4090 1, 8, 3, 1, 10, 3 });
4091
4092 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4093 workloadFactory,
4094 memoryManager,
4095 shape0,
4096 input0,
4097 shape1,
4098 input1,
4099 shape0,
4100 output,
4101 1.0f,
4102 0);
4103}
4104
Francis Murtaghe7a86a42018-08-29 12:42:10 +01004105namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004106LayerTestResult<float,4> MultiplicationTestHelper(
4107 armnn::IWorkloadFactory& workloadFactory,
4108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4109 const unsigned int shape0[4],
4110 const std::vector<float> & values0,
4111 const unsigned int shape1[4],
4112 const std::vector<float> & values1,
4113 const unsigned int outShape[4],
4114 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00004115{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01004116 const uint32_t dimensionCount = 4;
surmeh01bceff2f2018-03-29 16:29:27 +01004117 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
4118 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
4119 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00004120
surmeh01bceff2f2018-03-29 16:29:27 +01004121 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
4122 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004123
4124 LayerTestResult<float,4> ret(outputTensorInfo);
4125
4126 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4127 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4128 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4129
4130 armnn::MultiplicationQueueDescriptor data;
4131 armnn::WorkloadInfo info;
4132 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4133 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4134 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4135
4136 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4137
4138 inputHandle0->Allocate();
4139 inputHandle1->Allocate();
4140 outputHandle->Allocate();
4141
4142 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4143 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4144
Derek Lambertif30f7d32019-04-09 10:25:02 +01004145 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004146 workload->Execute();
4147
4148 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4149
surmeh01bceff2f2018-03-29 16:29:27 +01004150 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004151 return ret;
4152}
surmeh01bceff2f2018-03-29 16:29:27 +01004153} // anonymous namespace
4154
4155
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004156LayerTestResult<float,4> MultiplicationTest(
4157 armnn::IWorkloadFactory& workloadFactory,
4158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004159{
4160 const unsigned int width = 2;
4161 const unsigned int height = 2;
4162 const unsigned int channelCount = 2;
4163 const unsigned int batchSize = 2;
4164
4165 unsigned int shape[] = { batchSize, channelCount, height, width };
4166
4167 std::vector<float> input0({
4168 1, 1, 1, 1, 2, 2, 2, 2,
4169 3, 3, 3, 3, 4, 4, 4, 4 });
4170
4171 std::vector<float> input1({
4172 2, 2, 2, 2, 3, 3, 3, 3,
4173 4, 4, 4, 4, 5, 5, 5, 5 });
4174
4175 std::vector<float> output({
4176 2, 2, 2, 2, 6, 6, 6, 6,
4177 12, 12, 12, 12, 20, 20, 20, 20 });
4178
4179 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004180 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004181 shape,
4182 input0,
4183 shape,
4184 input1,
4185 shape,
4186 output);
4187}
4188
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004189LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4190 armnn::IWorkloadFactory& workloadFactory,
4191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004192{
4193 unsigned int shape0[] = { 1, 2, 2, 2 };
4194 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4195
4196 unsigned int shape1[] = { 1, 1, 1, 1 };
4197 std::vector<float> input1({ 2 });
4198
4199 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4200
4201 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004202 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004203 shape0,
4204 input0,
4205 shape1,
4206 input1,
4207 shape0,
4208 output);
4209}
4210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004211LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4212 armnn::IWorkloadFactory& workloadFactory,
4213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004214{
4215 unsigned int shape0[] = { 1, 3, 3, 2 };
4216 std::vector<float> input0({
4217 1, 2, 3, 4, 5, 6,
4218 7, 8, 9, 10, 11, 12,
4219 13, 14, 15, 16, 17, 18});
4220
4221 unsigned int shape1[] = { 1, 1, 1, 2 };
4222 std::vector<float> input1({ 1, 2 });
4223
4224 std::vector<float> output({
4225 1, 4, 3, 8, 5, 12,
4226 7, 16, 9, 20, 11, 24,
4227 13, 28, 15, 32, 17, 36});
4228
4229 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004230 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004231 shape0,
4232 input0,
4233 shape1,
4234 input1,
4235 shape0,
4236 output);
4237}
telsoa014fcda012018-03-09 14:13:49 +00004238
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004239LayerTestResult<float,4> CompareMultiplicationTest(
4240 armnn::IWorkloadFactory& workloadFactory,
4241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4242 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004243{
4244 const unsigned int width = 16;
4245 const unsigned int height = 32;
4246 const unsigned int channelCount = 2;
4247 const unsigned int batchSize = 5;
4248
4249 armnn::TensorInfo inputTensorInfo0;
4250 armnn::TensorInfo inputTensorInfo1;
4251 armnn::TensorInfo outputTensorInfo;
4252
4253 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4254
4255 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4256 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4257 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4258
4259 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4260
4261 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4262 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4263
4264 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4265 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4266 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4267
4268 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4269 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4270 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4271
4272 armnn::MultiplicationQueueDescriptor data;
4273 armnn::WorkloadInfo info;
4274 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4275 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4276 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4277
4278 armnn::MultiplicationQueueDescriptor refData = data;
4279 armnn::WorkloadInfo refInfo = info;
4280 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4281 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4282 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4283
4284 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4285 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4286
4287 inputHandle0->Allocate();
4288 inputHandle1->Allocate();
4289 outputHandle->Allocate();
4290 inputHandle0Ref->Allocate();
4291 inputHandle1Ref->Allocate();
4292 outputHandleRef->Allocate();
4293
4294 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4295 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4296 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4297 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4298
Derek Lambertif30f7d32019-04-09 10:25:02 +01004299 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004300 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004301 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004302 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00004303 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4304 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4305
4306 return comparisonResult;
4307}
4308
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004309LayerTestResult<float,4> CompareBatchNormTest(
4310 armnn::IWorkloadFactory& workloadFactory,
4311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4312 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004313{
4314 const unsigned int width = 2;
4315 const unsigned int height = 3;
4316 const unsigned int channels = 5;
4317 const unsigned int batchSize = 3;
4318
4319 armnn::TensorInfo inputTensorInfo;
4320 armnn::TensorInfo outputTensorInfo;
4321 armnn::TensorInfo tensorInfo;
4322
4323 constexpr unsigned int shape[] = {batchSize, channels, height, width};
4324 constexpr unsigned int tensorShape[] = {channels};
4325
4326 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4327 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4328 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4329
4330 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4331
4332 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
4333 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4334 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
4335 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
4336
4337 LayerTestResult<float,4> ret(outputTensorInfo);
4338
4339 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4340 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4341
4342 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4343 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4344
4345 armnn::BatchNormalizationQueueDescriptor data;
4346 armnn::WorkloadInfo info;
4347 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4348 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4349 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4350 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4351
4352 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4353 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4354 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4355 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4356
4357 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4358 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4359 data.m_Mean = &meanTensor;
4360 data.m_Variance = &varianceTensor;
4361 data.m_Beta = &betaTensor;
4362 data.m_Gamma = &gammaTensor;
4363 data.m_Parameters.m_Eps = 0.01f;
4364
4365 armnn::BatchNormalizationQueueDescriptor refData = data;
4366 armnn::WorkloadInfo refInfo = info;
4367 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4368 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4369
4370 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4371 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4372
4373 inputHandle->Allocate();
4374 outputHandle->Allocate();
4375 inputHandleRef->Allocate();
4376 outputHandleRef->Allocate();
4377
4378 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4379 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4380
Derek Lambertif30f7d32019-04-09 10:25:02 +01004381 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004382 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004383 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004384 workloadRef->Execute();
4385
4386 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4387 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4388
4389 return ret;
4390}
4391
surmeh013537c2c2018-05-18 16:31:43 +01004392template<typename T>
4393void PermuteTensorData(
4394 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004396 const armnn::PermutationVector& mappings,
4397 armnn::TensorInfo & inputTensorInfo,
4398 const T * inputData,
4399 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004400{
surmeh013537c2c2018-05-18 16:31:43 +01004401 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4402 if (inputData == nullptr)
4403 {
4404 // Nullptr is an error in the test. By returning without doing the concatenation
4405 // I expect the caller to fail the test. It still makes sense to report this as
4406 // an assert for Debug builds.
4407 return;
4408 }
telsoa014fcda012018-03-09 14:13:49 +00004409
surmeh013537c2c2018-05-18 16:31:43 +01004410 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4411
4412 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4413 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4414
4415 armnn::PermuteQueueDescriptor queueDescriptor;
4416 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4417 armnn::WorkloadInfo workloadInfo;
4418 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4419 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4420
4421 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4422
4423 inputHandle->Allocate();
4424 outputHandle->Allocate();
4425
4426 CopyDataToITensorHandle(inputHandle.get(), inputData);
4427
Derek Lambertif30f7d32019-04-09 10:25:02 +01004428 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004429 workload->Execute();
4430
4431 outputData.resize(outputTensorInfo.GetNumElements());
4432 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4433 inputTensorInfo = outputTensorInfo;
4434}
4435
Jim Flynn825af452019-05-20 12:49:28 +01004436armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004437 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4438 unsigned int concatDim)
4439{
telsoa014fcda012018-03-09 14:13:49 +00004440 std::vector<armnn::TensorShape> shapes;
4441 shapes.reserve(inputTensorInfos.size());
4442 for (const armnn::TensorInfo& it: inputTensorInfos)
4443 {
4444 shapes.push_back(it.GetShape());
4445 }
surmeh013537c2c2018-05-18 16:31:43 +01004446
Jim Flynn825af452019-05-20 12:49:28 +01004447 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4448 shapes.end(),
4449 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004450}
4451
4452//
narpra015cdda352018-11-19 15:30:27 +00004453// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4454// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4455// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004456//
4457
4458bool NeedPermuteForConcat(
4459 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4460 unsigned int concatDim)
4461{
4462 // See note above. Additionally we expect the input shapes to have the
4463 // same number of dimensions.
4464 unsigned int nDimensions = 0;
4465
telsoa01c577f2c2018-08-31 09:22:23 +01004466 // Determine the number of dimensions as well as sanity check them
4467 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004468 for (auto && tensorInfo : inputTensorInfos)
4469 {
4470 if (!nDimensions)
4471 {
4472 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4473 }
4474 else
4475 {
4476 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4477 "Input shapes must have the same number of dimensions");
4478 }
4479 }
4480
narpra015cdda352018-11-19 15:30:27 +00004481 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004482}
4483
4484armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4485{
4486 unsigned int numDims = inputShape.GetNumDimensions();
4487 if (numDims >= 3)
4488 {
4489 // Nothing to do if the inputShape has at least 3 dimensions.
4490 return inputShape;
4491 }
4492
4493 std::vector<unsigned int> newDims(size_t(3), 1u);
4494 unsigned int expandedBy = 3 - numDims;
4495 for (unsigned int i=0; i<numDims; ++i)
4496 {
4497 newDims[expandedBy+i] = inputShape[i];
4498 }
4499 return armnn::TensorShape(3u, &newDims[0]);
4500}
4501
4502void Generate3dPermuteVectorForConcat(
4503 unsigned int numDimensions,
4504 unsigned int & concatDim,
4505 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4506{
4507 BOOST_ASSERT_MSG(numDimensions <= 3,
4508 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004509 unsigned int expandedBy = 3 - numDimensions;
4510 unsigned int expandedConcatAxis = concatDim + expandedBy;
4511
4512 if (expandedConcatAxis == 2)
4513 {
4514 concatDim = 0;
4515 armnn::PermutationVector forwardPermutation({1, 2, 0});
4516 armnn::PermutationVector reversePermutation({2, 0, 1});
4517 permutations = std::make_pair(forwardPermutation, reversePermutation);
4518 }
4519 else if (expandedConcatAxis == 1)
4520 {
4521 concatDim = 0;
4522 armnn::PermutationVector forwardPermutation({2, 0, 1});
4523 armnn::PermutationVector reversePermutation({1, 2, 0});
4524 permutations = std::make_pair(forwardPermutation, reversePermutation);
4525 }
4526 else
4527 {
4528 BOOST_ASSERT(expandedConcatAxis == 0);
4529 concatDim = 0;
4530 }
4531}
4532
4533//
4534// Permute the input tensors so we can do a supported concatenation.
4535// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4536// at the front. Finally this function tells what the output shape
4537// of the permuted concatenated tensor is going to be.
4538//
4539template <typename T>
4540void PermuteInputsForConcat(
4541 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004543 std::vector<armnn::TensorInfo> & inputTensorInfos,
4544 std::vector<T *> & inputData,
4545 std::vector<std::vector<T>> & inputDataStorage,
4546 armnn::PermutationVector & permuteVector,
4547 unsigned int & concatDim,
4548 armnn::TensorInfo & outputTensorInfo)
4549{
4550 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4551 "Expecting more than one tensor to be concatenated here");
4552
4553 unsigned int numDims = 0;
4554 unsigned int nthInput = 0;
4555 const armnn::PermutationVector identity({0, 1, 2});
4556
4557 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4558 std::make_pair(identity, identity);
4559
4560 inputDataStorage.resize(inputData.size());
4561
4562 for (auto && tensorInfo : inputTensorInfos)
4563 {
4564 if (numDims == 0)
4565 {
4566 numDims = tensorInfo.GetShape().GetNumDimensions();
4567 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004568
telsoa01c577f2c2018-08-31 09:22:23 +01004569 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004570 permuteVector = permutations.second;
4571 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4572 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4573 }
4574 else
4575 {
4576 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4577 "All inputs must have the same number of dimensions");
4578 }
4579
4580 armnn::TensorInfo newTensorInfo = tensorInfo;
4581 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4582
4583 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004584 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004585 permutations.first,
4586 newTensorInfo,
4587 inputData[nthInput],
4588 inputDataStorage[nthInput]);
4589
4590 inputData[nthInput] = inputDataStorage[nthInput].data();
4591 inputTensorInfos[nthInput] = newTensorInfo;
4592
4593 ++nthInput;
4594 }
4595
4596 outputTensorInfo.SetShape(
4597 armnnUtils::Permuted(
4598 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4599 permutations.first));
4600}
4601
4602
4603//
4604// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004605// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004606// output.
4607//
4608template <typename T>
4609void PermuteOutputForConcat(
4610 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004612 const armnn::TensorInfo & tensorInfo,
4613 const armnn::PermutationVector & permuteVector,
4614 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4615 T * data)
4616{
4617 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4618 if (data == nullptr)
4619 {
4620 // Nullptr is an error in the test. By returning without doing the permutation
4621 // I expect the caller to fail the test. It still makes sense to report this as
4622 // an assert for Debug builds.
4623 return;
4624 }
4625
4626 armnn::TensorInfo resultTensorInfo = tensorInfo;
4627 std::vector<T> inputData(tensorInfo.GetNumElements());
4628 std::vector<T> outputData;
4629
4630 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4631
4632 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004633 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004634 permuteVector,
4635 resultTensorInfo,
4636 &inputData[0],
4637 outputData);
4638
4639 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4640}
4641
4642template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004643void Concatenate(
4644 armnn::IWorkloadFactory& workloadFactory,
4645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4646 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4647 std::initializer_list<T *> inputsOrig,
4648 const armnn::TensorInfo& outputTensorInfoOrig,
4649 T * output,
narpra015cdda352018-11-19 15:30:27 +00004650 unsigned int concatDim,
4651 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004652{
4653 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4654 if (output == nullptr)
4655 {
4656 // Nullptr is an error in the test. By returning without doing the permutation
4657 // I expect the caller to fail the test. It still makes sense to report this as
4658 // an assert for Debug builds.
4659 return;
4660 }
4661
telsoa01c577f2c2018-08-31 09:22:23 +01004662 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004663 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4664 std::vector<T *> inputs = inputsOrig;
4665 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4666
4667 armnn::PermutationVector permuteVector{0, 1, 2};
4668
telsoa01c577f2c2018-08-31 09:22:23 +01004669 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004670 std::vector<std::vector<T>> tmpInputDataStorage;
4671
4672 const size_t inputCount = inputTensorInfos.size();
4673
4674 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4675
4676 if (needPermuteForConcat)
4677 {
4678 //
4679 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004680 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004681 //
4682 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004683 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004684 inputTensorInfos,
4685 inputs,
4686 tmpInputDataStorage,
4687 permuteVector,
4688 concatDim,
4689 outputTensorInfo);
4690 }
4691
narpra015cdda352018-11-19 15:30:27 +00004692 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004693
4694 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4695 inputHandles.reserve(inputCount);
4696
narpra015cdda352018-11-19 15:30:27 +00004697 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4698
Jim Flynne242f2d2019-05-22 14:24:13 +01004699 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004700 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004701 queueDescriptor.m_Parameters = viewsDescriptor;
4702
4703 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004704 {
narpra015cdda352018-11-19 15:30:27 +00004705 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4706 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4707 {
4708 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4709 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4710 }
telsoa014fcda012018-03-09 14:13:49 +00004711
narpra015cdda352018-11-19 15:30:27 +00004712 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004713
narpra015cdda352018-11-19 15:30:27 +00004714 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4715 for (unsigned int i = 0; i < inputCount; ++i)
4716 {
4717 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4718 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4719 subTensorsSupported ?
4720 workloadFactory.CreateSubTensorHandle(*outputHandle,
4721 inputTensorInfo.GetShape(),
4722 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4723 workloadFactory.CreateTensorHandle(inputTensorInfo);
4724
4725 inputHandles.emplace_back(std::move(inputHandle));
4726 }
4727
telsoa014fcda012018-03-09 14:13:49 +00004728 }
narpra015cdda352018-11-19 15:30:27 +00004729 else
4730 {
4731 for (unsigned int i = 0; i < inputCount; ++i)
4732 {
4733 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4734 inputHandles.emplace_back(std::move(inputHandle));
4735 }
4736 }
telsoa014fcda012018-03-09 14:13:49 +00004737
4738 for (unsigned int i = 0; i < inputCount; ++i)
4739 {
surmeh013537c2c2018-05-18 16:31:43 +01004740 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004741 }
4742
4743 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4744
Jim Flynn4ed6c832019-05-20 11:02:46 +01004745 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004746
4747 for (auto& inputHandle : inputHandles)
4748 {
4749 inputHandle->Allocate();
4750 }
4751
4752 outputHandle->Allocate();
4753
4754 unsigned int nextInputId = 0;
4755 for (auto& inputHandle : inputHandles)
4756 {
surmeh013537c2c2018-05-18 16:31:43 +01004757 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4758 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004759 }
4760
Derek Lambertif30f7d32019-04-09 10:25:02 +01004761 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004762 workload->Execute();
4763
surmeh013537c2c2018-05-18 16:31:43 +01004764 if (needPermuteForConcat)
4765 {
4766 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004767 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004768 outputTensorInfo,
4769 permuteVector,
4770 std::move(outputHandle),
4771 output);
4772 }
4773 else
4774 {
4775 CopyDataFromITensorHandle(output, outputHandle.get());
4776 }
telsoa014fcda012018-03-09 14:13:49 +00004777}
4778
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004779template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004780LayerTestResult<T, 1> Concatenation1dTestImpl(
4781 armnn::IWorkloadFactory& workloadFactory,
4782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4783 float qScale,
4784 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004785{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004786 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004787
4788 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4789 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4790 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4791
Jim Flynncbb66aa2019-05-15 13:03:54 +01004792 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004793
4794 LayerTestResult<T, 1> result(outputTensorInfo);
4795
4796 std::vector<T> output;
4797 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004798 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004799 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4800 { input0.data(), input1.data(), input2.data() },
4801 outputTensorInfo,
4802 output.data(),
4803 0,
4804 true);
telsoa014fcda012018-03-09 14:13:49 +00004805
4806 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4807 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4808 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4809 }));
4810
4811 return result;
4812}
4813
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004814LayerTestResult<float, 1> Concatenation1dTest(
4815 armnn::IWorkloadFactory& workloadFactory,
4816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004817{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004818 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004819}
4820
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004821template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004822LayerTestResult<T, 2> Concatenation2dTestImpl(
4823 armnn::IWorkloadFactory& workloadFactory,
4824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004825 const armnn::TensorInfo& outputTensorInfo,
4826 unsigned int dimension,
4827 const float qScale,
4828 const int32_t qOffset)
4829{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004830 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004831
4832 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4833 // Batch 0
4834 1.0f, 2.0f, 3.0f,
4835
4836 // Batch 1
4837 10.0f, 11.0f, 12.0f,
4838 }));
4839
4840 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4841 // Batch 0
4842 4.0f, 5.0f, 6.0f,
4843
4844 // Batch 1
4845 13.0f, 14.0f, 15.0f,
4846 }));
4847
4848 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4849 // Batch 0
4850 7.0f, 8.0f, 9.0f,
4851
4852 // Batch 1
4853 16.0f, 17.0f, 18.0f,
4854 }));
4855
4856 LayerTestResult<T, 2> result(outputTensorInfo);
4857
4858 std::vector<T> output;
4859 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004860 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004861 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4862 { input0.data(), input1.data(), input2.data() },
4863 outputTensorInfo,
4864 output.data(),
4865 dimension,
4866 true);
telsoa014fcda012018-03-09 14:13:49 +00004867
4868 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4869 return result;
4870}
4871
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004872template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004873LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4874 armnn::IWorkloadFactory& workloadFactory,
4875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4876 float qScale,
4877 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004878{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004879 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004880
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004881 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4882 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4883
telsoa014fcda012018-03-09 14:13:49 +00004884 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4885 // Batch 0
4886 1.0f, 2.0f, 3.0f,
4887
4888 // Batch 1
4889 10.0f, 11.0f, 12.0f,
4890
4891 // Batch 2
4892 4.0f, 5.0f, 6.0f,
4893
4894 // Batch 3
4895 13.0f, 14.0f, 15.0f,
4896
4897 // Batch 4
4898 7.0f, 8.0f, 9.0f,
4899
4900 // Batch 5
4901 16.0f, 17.0f, 18.0f,
4902 }));
4903
4904 return result;
4905}
4906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004907LayerTestResult<float, 2> Concatenation2dDim0Test(
4908 armnn::IWorkloadFactory& workloadFactory,
4909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004910{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004911 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004912}
4913
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004914template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004915LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4916 armnn::IWorkloadFactory& workloadFactory,
4917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4918 float qScale,
4919 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004920{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004921 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004922
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004923 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4924 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4925
telsoa014fcda012018-03-09 14:13:49 +00004926 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4927 // Batch 0
4928 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4929
4930 // Batch 1
4931 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4932 }));
4933
4934 return result;
4935}
4936
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004937LayerTestResult<float, 2> Concatenation2dDim1Test(
4938 armnn::IWorkloadFactory& workloadFactory,
4939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004940{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004941 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004942}
4943
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004944template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004945LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4946 armnn::IWorkloadFactory& workloadFactory,
4947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4948 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004949 int32_t qOffset)
4950{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004951 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004952 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4953 // Batch 0
4954 1.0f, 2.0f, 3.0f,
4955
4956 // Batch 1
4957 10.0f, 11.0f, 12.0f,
4958 }));
4959
Jim Flynncbb66aa2019-05-15 13:03:54 +01004960 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004961 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4962 // Batch 0
4963 4.0f, 5.0f, 6.0f,
4964
4965 // Batch 1
4966 13.0f, 14.0f, 15.0f,
4967
4968 // Batch 0
4969 7.0f, 8.0f, 9.0f,
4970 }));
4971
Jim Flynncbb66aa2019-05-15 13:03:54 +01004972 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004973 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4974 // Batch 1
4975 16.0f, 17.0f, 18.0f,
4976 }));
4977
Jim Flynncbb66aa2019-05-15 13:03:54 +01004978 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004979 LayerTestResult<T, 2> result(outputTensorInfo);
4980
4981 std::vector<T> output;
4982 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004983 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004984 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4985 { input0.data(), input1.data(), input2.data() },
4986 outputTensorInfo,
4987 output.data(),
4988 0,
4989 true);
telsoa014fcda012018-03-09 14:13:49 +00004990
4991 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4992 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4993 // Batch 0
4994 1.0f, 2.0f, 3.0f,
4995
4996 // Batch 1
4997 10.0f, 11.0f, 12.0f,
4998
4999 // Batch 2
5000 4.0f, 5.0f, 6.0f,
5001
5002 // Batch 3
5003 13.0f, 14.0f, 15.0f,
5004
5005 // Batch 4
5006 7.0f, 8.0f, 9.0f,
5007
5008 // Batch 5
5009 16.0f, 17.0f, 18.0f,
5010 }));
5011
5012 return result;
5013}
5014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005015LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5016 armnn::IWorkloadFactory& workloadFactory,
5017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005018{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005019 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5020 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005021}
5022
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005023template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005024LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5025 armnn::IWorkloadFactory& workloadFactory,
5026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5027 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005028 int32_t qOffset)
5029{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005030 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005031 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5032 // Batch 0
5033 1.0f, 2.0f, 3.0f,
5034
5035 // Batch 1
5036 10.0f, 11.0f, 12.0f,
5037 }));
5038
Jim Flynncbb66aa2019-05-15 13:03:54 +01005039 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005040 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5041 // Batch 0
5042 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5043
5044 // Batch 1
5045 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5046 }));
5047
Jim Flynncbb66aa2019-05-15 13:03:54 +01005048 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005049 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5050 // Batch 0
5051 9.0f,
5052
5053 // Batch 1
5054 18.0f
5055 }));
5056
Jim Flynncbb66aa2019-05-15 13:03:54 +01005057 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005058 LayerTestResult<T, 2> result(outputTensorInfo);
5059
5060 std::vector<T> output;
5061 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005062 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005063 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5064 { input0.data(), input1.data(), input2.data() },
5065 outputTensorInfo,
5066 output.data(),
5067 1,
5068 true);
telsoa014fcda012018-03-09 14:13:49 +00005069
5070 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5071 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5072 // Batch 0
5073 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5074
5075 // Batch 1
5076 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5077 }));
5078
5079 return result;
5080}
5081
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005082LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5083 armnn::IWorkloadFactory& workloadFactory,
5084 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005085{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005086 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5087 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005088}
5089
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005090template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005091LayerTestResult<T, 3> Concatenation3dTestImpl(
5092 armnn::IWorkloadFactory& workloadFactory,
5093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005094 const armnn::TensorInfo& outputTensorInfo,
5095 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00005096 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00005097 float qScale,
5098 int32_t qOffset)
5099{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005100 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005101
5102 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5103 // Batch 0, Channel 0
5104 1.0f, 2.0f,
5105
5106 // Batch 0, Channel 1
5107 3.0f, 4.0f,
5108
5109 // Batch 0, Channel 2
5110 5.0f, 6.0f,
5111
5112 // Batch 1, Channel 0
5113 19.0f, 20.0f,
5114
5115 // Batch 1, Channel 1
5116 21.0f, 22.0f,
5117
5118 // Batch 1, Channel 2
5119 23.0f, 24.0f
5120 }));
5121
5122 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5123 // Batch 0, Channel 0
5124 7.0f, 8.0f,
5125
5126 // Batch 0, Channel 1
5127 9.0f, 10.0f,
5128
5129 // Batch 0, Channel 2
5130 11.0f, 12.0f,
5131
5132 // Batch 1, Channel 0
5133 25.0f, 26.0f,
5134
5135 // Batch 1, Channel 1
5136 27.0f, 28.0f,
5137
5138 // Batch 1, Channel 2
5139 29.0f, 30.0f
5140 }));
5141
5142 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5143 // Batch 0, Channel 0
5144 13.0f, 14.0f,
5145
5146 // Batch 0, Channel 1
5147 15.0f, 16.0f,
5148
5149 // Batch 0, Channel 2
5150 17.0f, 18.0f,
5151
5152 // Batch 1, Channel 0
5153 31.0f, 32.0f,
5154
5155 // Batch 1, Channel 1
5156 33.0f, 34.0f,
5157
5158 // Batch 1, Channel 2
5159 35.0f, 36.0f
5160 }));
5161
5162 LayerTestResult<T, 3> result(outputTensorInfo);
5163
5164 std::vector<T> output;
5165 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005166 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005167 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5168 { input0.data(), input1.data(), input2.data() },
5169 outputTensorInfo,
5170 output.data(),
5171 dimension,
5172 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005173
5174 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5175 return result;
5176}
5177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005178template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005179LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5180 armnn::IWorkloadFactory& workloadFactory,
5181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5182 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005183 int32_t qOffset)
5184{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005185 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005187 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5188 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5189
telsoa014fcda012018-03-09 14:13:49 +00005190 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5191 // Batch 0, Channel 0
5192 1.0f, 2.0f,
5193
5194 // Batch 0, Channel 1
5195 3.0f, 4.0f,
5196
5197 // Batch 0, Channel 2
5198 5.0f, 6.0f,
5199
5200 // Batch 1, Channel 0
5201 19.0f, 20.0f,
5202
5203 // Batch 1, Channel 1
5204 21.0f, 22.0f,
5205
5206 // Batch 1, Channel 2
5207 23.0f, 24.0f,
5208
5209 // Batch 2, Channel 0
5210 7.0f, 8.0f,
5211
5212 // Batch 2, Channel 1
5213 9.0f, 10.0f,
5214
5215 // Batch 2, Channel 2
5216 11.0f, 12.0f,
5217
5218 // Batch 3, Channel 0
5219 25.0f, 26.0f,
5220
5221 // Batch 3, Channel 1
5222 27.0f, 28.0f,
5223
5224 // Batch 3, Channel 2
5225 29.0f, 30.0f,
5226
5227 // Batch 4, Channel 0
5228 13.0f, 14.0f,
5229
5230 // Batch 4, Channel 1
5231 15.0f, 16.0f,
5232
5233 // Batch 4, Channel 2
5234 17.0f, 18.0f,
5235
5236 // Batch 5, Channel 0
5237 31.0f, 32.0f,
5238
5239 // Batch 5, Channel 1
5240 33.0f, 34.0f,
5241
5242 // Batch 5, Channel 2
5243 35.0f, 36.0f
5244 }));
narpra015cdda352018-11-19 15:30:27 +00005245
telsoa014fcda012018-03-09 14:13:49 +00005246 return result;
5247}
5248
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005249LayerTestResult<float, 3> Concatenation3dDim0Test(
5250 armnn::IWorkloadFactory& workloadFactory,
5251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005252{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005253 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005254}
5255
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005256template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005257LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5258 armnn::IWorkloadFactory& workloadFactory,
5259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5260 float qScale,
5261 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005262{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005263 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005264
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005265 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5266 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005267
telsoa014fcda012018-03-09 14:13:49 +00005268 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5269 // Batch 0, Channel 0
5270 1.0f, 2.0f,
5271
5272 // Batch 0, Channel 1
5273 3.0f, 4.0f,
5274
5275 // Batch 0, Channel 2
5276 5.0f, 6.0f,
5277
5278 // Batch 0, Channel 3
5279 7.0f, 8.0f,
5280
5281 // Batch 0, Channel 4
5282 9.0f, 10.0f,
5283
5284 // Batch 0, Channel 5
5285 11.0f, 12.0f,
5286
5287 // Batch 0, Channel 6
5288 13.0f, 14.0f,
5289
5290 // Batch 0, Channel 7
5291 15.0f, 16.0f,
5292
5293 // Batch 0, Channel 8
5294 17.0f, 18.0f,
5295
5296 // Batch 1, Channel 0
5297 19.0f, 20.0f,
5298
5299 // Batch 1, Channel 1
5300 21.0f, 22.0f,
5301
5302 // Batch 1, Channel 2
5303 23.0f, 24.0f,
5304
5305 // Batch 1, Channel 3
5306 25.0f, 26.0f,
5307
5308 // Batch 1, Channel 4
5309 27.0f, 28.0f,
5310
5311 // Batch 1, Channel 5
5312 29.0f, 30.0f,
5313
5314 // Batch 1, Channel 6
5315 31.0f, 32.0f,
5316
5317 // Batch 1, Channel 7
5318 33.0f, 34.0f,
5319
5320 // Batch 1, Channel 8
5321 35.0f, 36.0f
5322 }));
5323
5324 return result;
5325}
5326
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005327LayerTestResult<float, 3> Concatenation3dDim1Test(
5328 armnn::IWorkloadFactory& workloadFactory,
5329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005330{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005331 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005332}
5333
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005334template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005335LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5336 armnn::IWorkloadFactory& workloadFactory,
5337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005338 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005339 float qScale,
5340 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005341{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005342 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005343
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005344 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5345 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005346
telsoa014fcda012018-03-09 14:13:49 +00005347 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5348 // Batch 0, Channel 0
5349 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5350
5351 // Batch 0, Channel 1
5352 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5353
5354 // Batch 0, Channel 2
5355 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5356
5357 // Batch 1, Channel 0
5358 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5359
5360 // Batch 1, Channel 1
5361 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5362
5363 // Batch 1, Channel 2
5364 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5365 }));
5366
5367 return result;
5368}
5369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005370LayerTestResult<float, 3> Concatenation3dDim2Test(
5371 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5373 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005374{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005375 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5376 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005377}
5378
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005379template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005380LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5381 armnn::IWorkloadFactory& workloadFactory,
5382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5383 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005384 int32_t qOffset)
5385{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005386 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005387 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5388 // Batch 0, Channel 0
5389 1.0f, 2.0f,
5390
5391 // Batch 0, Channel 1
5392 3.0f, 4.0f,
5393
5394 // Batch 0, Channel 2
5395 5.0f, 6.0f,
5396
5397 // Batch 1, Channel 0
5398 19.0f, 20.0f,
5399
5400 // Batch 1, Channel 1
5401 21.0f, 22.0f,
5402
5403 // Batch 1, Channel 2
5404 23.0f, 24.0f
5405 }));
5406
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005407 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005408 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5409 // Batch 0, Channel 0
5410 7.0f, 8.0f,
5411
5412 // Batch 0, Channel 1
5413 9.0f, 10.0f,
5414
5415 // Batch 0, Channel 2
5416 11.0f, 12.0f,
5417 }));
5418
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005419 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005420 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5421 // Batch 0, Channel 0
5422 25.0f, 26.0f,
5423
5424 // Batch 0, Channel 1
5425 27.0f, 28.0f,
5426
5427 // Batch 0, Channel 2
5428 29.0f, 30.0f,
5429
5430 // Batch 1, Channel 0
5431 13.0f, 14.0f,
5432
5433 // Batch 1, Channel 1
5434 15.0f, 16.0f,
5435
5436 // Batch 1, Channel 2
5437 17.0f, 18.0f,
5438
5439 // Batch 2, Channel 0
5440 31.0f, 32.0f,
5441
5442 // Batch 2, Channel 1
5443 33.0f, 34.0f,
5444
5445 // Batch 2, Channel 2
5446 35.0f, 36.0f
5447 }));
5448
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005449 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005450 LayerTestResult<T, 3> result(outputTensorInfo);
5451
5452 std::vector<T> output;
5453 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005454 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005455 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5456 { input0.data(), input1.data(), input2.data() },
5457 outputTensorInfo,
5458 output.data(),
5459 0,
5460 true);
telsoa014fcda012018-03-09 14:13:49 +00005461
5462 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5463 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5464 // Batch 0, Channel 0
5465 1.0f, 2.0f,
5466
5467 // Batch 0, Channel 1
5468 3.0f, 4.0f,
5469
5470 // Batch 0, Channel 2
5471 5.0f, 6.0f,
5472
5473 // Batch 1, Channel 0
5474 19.0f, 20.0f,
5475
5476 // Batch 1, Channel 1
5477 21.0f, 22.0f,
5478
5479 // Batch 1, Channel 2
5480 23.0f, 24.0f,
5481
5482 // Batch 2, Channel 0
5483 7.0f, 8.0f,
5484
5485 // Batch 2, Channel 1
5486 9.0f, 10.0f,
5487
5488 // Batch 2, Channel 2
5489 11.0f, 12.0f,
5490
5491 // Batch 3, Channel 0
5492 25.0f, 26.0f,
5493
5494 // Batch 3, Channel 1
5495 27.0f, 28.0f,
5496
5497 // Batch 3, Channel 2
5498 29.0f, 30.0f,
5499
5500 // Batch 4, Channel 0
5501 13.0f, 14.0f,
5502
5503 // Batch 4, Channel 1
5504 15.0f, 16.0f,
5505
5506 // Batch 4, Channel 2
5507 17.0f, 18.0f,
5508
5509 // Batch 5, Channel 0
5510 31.0f, 32.0f,
5511
5512 // Batch 5, Channel 1
5513 33.0f, 34.0f,
5514
5515 // Batch 5, Channel 2
5516 35.0f, 36.0f
5517 }));
5518
5519 return result;
5520}
5521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005522LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5523 armnn::IWorkloadFactory& workloadFactory,
5524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005525{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005526 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5527 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005528}
5529
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005530template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005531LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5532 armnn::IWorkloadFactory& workloadFactory,
5533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5534 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005535 int32_t qOffset)
5536{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005537 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005538 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5539 // Batch 0, Channel 0
5540 1.0f, 2.0f,
5541
5542 // Batch 0, Channel 1
5543 3.0f, 4.0f,
5544
5545 // Batch 0, Channel 2
5546 5.0f, 6.0f,
5547
5548 // Batch 1, Channel 0
5549 19.0f, 20.0f,
5550
5551 // Batch 1, Channel 1
5552 21.0f, 22.0f,
5553
5554 // Batch 1, Channel 2
5555 23.0f, 24.0f
5556 }));
5557
Jim Flynncbb66aa2019-05-15 13:03:54 +01005558 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005559 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5560 // Batch 0, Channel 0
5561 7.0f, 8.0f,
5562
5563 // Batch 0, Channel 1
5564 9.0f, 10.0f,
5565
5566 // Batch 0, Channel 2
5567 11.0f, 12.0f,
5568
5569 // Batch 0, Channel 3
5570 25.0f, 26.0f,
5571
5572 // Batch 1, Channel 0
5573 27.0f, 28.0f,
5574
5575 // Batch 1, Channel 1
5576 29.0f, 30.0f,
5577
5578 // Batch 1, Channel 2
5579 13.0f, 14.0f,
5580
5581 // Batch 1, Channel 3
5582 15.0f, 16.0f,
5583 }));
5584
Jim Flynncbb66aa2019-05-15 13:03:54 +01005585 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005586 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5587 // Batch 0, Channel 0
5588 17.0f, 18.0f,
5589
5590 // Batch 1, Channel 0
5591 31.0f, 32.0f,
5592 }));
5593
Jim Flynncbb66aa2019-05-15 13:03:54 +01005594 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005595 LayerTestResult<T, 3> result(outputTensorInfo);
5596
5597 std::vector<T> output;
5598 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005599 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005600 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5601 { input0.data(), input1.data(), input2.data() },
5602 outputTensorInfo,
5603 output.data(),
5604 1,
5605 true);
telsoa014fcda012018-03-09 14:13:49 +00005606
5607 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5608 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5609 // Batch 0, Channel 0
5610 1.0f, 2.0f,
5611
5612 // Batch 0, Channel 1
5613 3.0f, 4.0f,
5614
5615 // Batch 0, Channel 2
5616 5.0f, 6.0f,
5617
5618 // Batch 0, Channel 3
5619 7.0f, 8.0f,
5620
5621 // Batch 0, Channel 4
5622 9.0f, 10.0f,
5623
5624 // Batch 0, Channel 5
5625 11.0f, 12.0f,
5626
5627 // Batch 0, Channel 6
5628 25.0f, 26.0f,
5629
5630 // Batch 0, Channel 7
5631 17.0f, 18.0f,
5632
5633 // Batch 1, Channel 0
5634 19.0f, 20.0f,
5635
5636 // Batch 1, Channel 1
5637 21.0f, 22.0f,
5638
5639 // Batch 1, Channel 2
5640 23.0f, 24.0f,
5641
5642 // Batch 1, Channel 3
5643 27.0f, 28.0f,
5644
5645 // Batch 1, Channel 4
5646 29.0f, 30.0f,
5647
5648 // Batch 1, Channel 5
5649 13.0f, 14.0f,
5650
5651 // Batch 1, Channel 6
5652 15.0f, 16.0f,
5653
5654 // Batch 1, Channel 7
5655 31.0f, 32.0f,
5656 }));
5657
5658 return result;
5659}
5660
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005661LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5662 armnn::IWorkloadFactory& workloadFactory,
5663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005664{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005665 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5666 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005667}
5668
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005669template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005670LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5671 armnn::IWorkloadFactory& workloadFactory,
5672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005673 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005674 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005675 int32_t qOffset)
5676{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005677 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005678 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5679 // Batch 0, Channel 0
5680 1.0f, 2.0f,
5681
5682 // Batch 0, Channel 1
5683 3.0f, 4.0f,
5684
5685 // Batch 0, Channel 2
5686 5.0f, 6.0f,
5687
5688 // Batch 1, Channel 0
5689 19.0f, 20.0f,
5690
5691 // Batch 1, Channel 1
5692 21.0f, 22.0f,
5693
5694 // Batch 1, Channel 2
5695 23.0f, 24.0f
5696 }));
5697
Jim Flynncbb66aa2019-05-15 13:03:54 +01005698 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005699 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5700 // Batch 0, Channel 0
5701 7.0f,
5702
5703 // Batch 0, Channel 1
5704 9.0f,
5705
5706 // Batch 0, Channel 2
5707 11.0f,
5708
5709 // Batch 1, Channel 0
5710 25.0f,
5711
5712 // Batch 1, Channel 1
5713 27.0f,
5714
5715 // Batch 1, Channel 2
5716 29.0f
5717 }));
5718
Jim Flynncbb66aa2019-05-15 13:03:54 +01005719 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005720 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5721 // Batch 0, Channel 0
5722 13.0f, 14.0f, 50.0f,
5723
5724 // Batch 0, Channel 1
5725 15.0f, 16.0f, 51.0f,
5726
5727 // Batch 0, Channel 2
5728 17.0f, 18.0f, 52.0f,
5729
5730 // Batch 1, Channel 0
5731 31.0f, 32.0f, 53.0f,
5732
5733 // Batch 1, Channel 1
5734 33.0f, 34.0f, 54.0f,
5735
5736 // Batch 1, Channel 2
5737 35.0f, 36.0f, 55.0f,
5738 }));
5739
Jim Flynncbb66aa2019-05-15 13:03:54 +01005740 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005741 LayerTestResult<T, 3> result(outputTensorInfo);
5742
5743 std::vector<T> output;
5744 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005745 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005746 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5747 { input0.data(), input1.data(), input2.data() },
5748 outputTensorInfo,
5749 output.data(),
5750 2,
5751 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005752
5753 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5754 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5755 // Batch 0, Channel 0
5756 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5757
5758 // Batch 0, Channel 1
5759 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5760
5761 // Batch 0, Channel 2
5762 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5763
5764 // Batch 1, Channel 0
5765 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5766
5767 // Batch 1, Channel 1
5768 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5769
5770 // Batch 1, Channel 2
5771 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5772 }));
5773
5774 return result;
5775}
5776
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005777LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5778 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5780 bool useSubtensor)
5781{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005782 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5783 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005784}
5785
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005786template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005787LayerTestResult<T, 4> Concatenation4dTestImpl(
5788 armnn::IWorkloadFactory& workloadFactory,
5789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5790 const armnn::TensorInfo& outputTensorInfo,
5791 unsigned int dimension,
5792 bool useSubtensor,
5793 float qScale,
5794 int32_t qOffset)
5795{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005796 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005797
5798 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5799 1.0f, 2.0f,
5800 3.0f, 4.0f,
5801 5.0f, 6.0f,
5802 7.0f, 8.0f,
5803 9.0f, 10.0f,
5804 11.0f, 12.0f
5805 }));
5806
5807 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5808 11.0f, 12.0f,
5809 13.0f, 14.0f,
5810 15.0f, 16.0f,
5811 17.0f, 18.0f,
5812 19.0f, 20.0f,
5813 21.0f, 22.0f
5814 }));
5815
5816 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5817 21.0f, 22.0f,
5818 23.0f, 24.0f,
5819 25.0f, 26.0f,
5820 27.0f, 28.0f,
5821 29.0f, 30.0f,
5822 31.0f, 32.0f
5823 }));
5824
5825 LayerTestResult<T, 4> result(outputTensorInfo);
5826
5827 std::vector<T> output;
5828 output.resize(outputTensorInfo.GetNumElements());
5829
5830 Concatenate<T>(workloadFactory,
5831 memoryManager,
5832 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5833 {input0.data(), input1.data(), input2.data()},
5834 outputTensorInfo,
5835 output.data(),
5836 dimension,
5837 useSubtensor);
5838
5839 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5840 return result;
5841}
5842
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005843template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005844LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5845 armnn::IWorkloadFactory& workloadFactory,
5846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5847 float qScale,
5848 int32_t qOffset)
5849{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005850 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005851
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005852 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5853 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5854
narpra015cdda352018-11-19 15:30:27 +00005855 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5856 1.0f, 2.0f,
5857 3.0f, 4.0f,
5858 5.0f, 6.0f,
5859 7.0f, 8.0f,
5860 9.0f, 10.0f,
5861 11.0f, 12.0f,
5862
5863 11.0f, 12.0f,
5864 13.0f, 14.0f,
5865 15.0f, 16.0f,
5866 17.0f, 18.0f,
5867 19.0f, 20.0f,
5868 21.0f, 22.0f,
5869
5870 21.0f, 22.0f,
5871 23.0f, 24.0f,
5872 25.0f, 26.0f,
5873 27.0f, 28.0f,
5874 29.0f, 30.0f,
5875 31.0f, 32.0f
5876 }));
5877 return result;
5878}
5879
5880LayerTestResult<float, 4> Concatenation4dDim0Test(
5881 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005883{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005884 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005885}
5886
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005887template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005888LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5889 armnn::IWorkloadFactory& workloadFactory,
5890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5891 float qScale,
5892 int32_t qOffset)
5893{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005894 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005895
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005896 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5897 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5898
narpra015cdda352018-11-19 15:30:27 +00005899 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5900 1.0f, 2.0f,
5901 3.0f, 4.0f,
5902 5.0f, 6.0f,
5903 7.0f, 8.0f,
5904 9.0f, 10.0f,
5905 11.0f, 12.0f,
5906
5907 11.0f, 12.0f,
5908 13.0f, 14.0f,
5909 15.0f, 16.0f,
5910 17.0f, 18.0f,
5911 19.0f, 20.0f,
5912 21.0f, 22.0f,
5913
5914 21.0f, 22.0f,
5915 23.0f, 24.0f,
5916 25.0f, 26.0f,
5917 27.0f, 28.0f,
5918 29.0f, 30.0f,
5919 31.0f, 32.0f
5920 }));
5921
5922 return result;
5923}
5924
5925LayerTestResult<float, 4> Concatenation4dDim1Test(
5926 armnn::IWorkloadFactory& workloadFactory,
5927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5928{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005929 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005930}
5931
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005932template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005933LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5934 armnn::IWorkloadFactory& workloadFactory,
5935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5936 float qScale,
5937 int32_t qOffset)
5938{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005939 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005940
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005941 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5942 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5943
narpra015cdda352018-11-19 15:30:27 +00005944 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5945 1.0f, 2.0f,
5946 3.0f, 4.0f,
5947 11.0f, 12.0f,
5948 13.0f, 14.0f,
5949 21.0f, 22.0f,
5950 23.0f, 24.0f,
5951
5952 5.0f, 6.0f,
5953 7.0f, 8.0f,
5954 15.0f, 16.0f,
5955 17.0f, 18.0f,
5956 25.0f, 26.0f,
5957 27.0f, 28.0f,
5958
5959 9.0f, 10.0f,
5960 11.0f, 12.0f,
5961 19.0f, 20.0f,
5962 21.0f, 22.0f,
5963 29.0f, 30.0f,
5964 31.0f, 32.0f
5965 }));
5966
5967 return result;
5968}
5969
5970LayerTestResult<float, 4> Concatenation4dDim2Test(
5971 armnn::IWorkloadFactory& workloadFactory,
5972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005974 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005975}
5976
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005977template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005978LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5979 armnn::IWorkloadFactory& workloadFactory,
5980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5981 float qScale,
5982 int32_t qOffset,
5983 bool useSubtensor)
5984{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005985 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005986
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005987 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5988 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5989
narpra015cdda352018-11-19 15:30:27 +00005990 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5991 1.0f, 2.0f,
5992 11.0f, 12.0f,
5993 21.0f, 22.0f,
5994 3.0f, 4.0f,
5995 13.0f, 14.0f,
5996 23.0f, 24.0f,
5997
5998 5.0f, 6.0f,
5999 15.0f, 16.0f,
6000 25.0f, 26.0f,
6001 7.0f, 8.0f,
6002 17.0f, 18.0f,
6003 27.0f, 28.0f,
6004
6005 9.0f, 10.0f,
6006 19.0f, 20.0f,
6007 29.0f, 30.0f,
6008 11.0f, 12.0f,
6009 21.0f, 22.0f,
6010 31.0f, 32.0f
6011 }));
6012
6013 return result;
6014}
6015
6016LayerTestResult<float, 4> Concatenation4dDim3Test(
6017 armnn::IWorkloadFactory& workloadFactory,
6018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6019 bool useSubtensor)
6020{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006021 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6022 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00006023}
6024
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006025template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006026LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6027 armnn::IWorkloadFactory& workloadFactory,
6028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6029 float qScale,
6030 int32_t qOffset)
6031{
6032 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006033 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006034
6035 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6036 1.0f, 2.0f,
6037 3.0f, 4.0f,
6038 5.0f, 6.0f,
6039 7.0f, 8.0f,
6040 9.0f, 10.0f,
6041 11.0f, 12.0f
6042 }));
6043
Jim Flynncbb66aa2019-05-15 13:03:54 +01006044 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006045
6046 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6047 11.0f, 12.0f,
6048 13.0f, 14.0f,
6049 15.0f, 16.0f,
6050 17.0f, 18.0f,
6051 19.0f, 20.0f,
6052 21.0f, 22.0f,
6053
6054 21.0f, 22.0f,
6055 23.0f, 24.0f,
6056 25.0f, 26.0f,
6057 27.0f, 28.0f,
6058 29.0f, 30.0f,
6059 31.0f, 32.0f
6060
6061 }));
6062
Jim Flynncbb66aa2019-05-15 13:03:54 +01006063 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006064
6065 LayerTestResult<T, 4> result(outputTensorInfo);
6066
6067 std::vector<T> output;
6068 output.resize(outputTensorInfo.GetNumElements());
6069 Concatenate<T>(workloadFactory,
6070 memoryManager,
6071 {inputTensorInfo0, inputTensorInfo1},
6072 {input0.data(), input1.data()},
6073 outputTensorInfo,
6074 output.data(),
6075 dimension,
6076 true);
6077
6078 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6079 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6080 1.0f, 2.0f,
6081 3.0f, 4.0f,
6082 5.0f, 6.0f,
6083 7.0f, 8.0f,
6084 9.0f, 10.0f,
6085 11.0f, 12.0f,
6086
6087 11.0f, 12.0f,
6088 13.0f, 14.0f,
6089 15.0f, 16.0f,
6090 17.0f, 18.0f,
6091 19.0f, 20.0f,
6092 21.0f, 22.0f,
6093
6094 21.0f, 22.0f,
6095 23.0f, 24.0f,
6096 25.0f, 26.0f,
6097 27.0f, 28.0f,
6098 29.0f, 30.0f,
6099 31.0f, 32.0f
6100 }));
6101
6102 return result;
6103}
6104
6105LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6106 armnn::IWorkloadFactory& workloadFactory,
6107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6108{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006109 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6110 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006111}
6112
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006113template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006114LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6115 armnn::IWorkloadFactory& workloadFactory,
6116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6117 float qScale,
6118 int32_t qOffset)
6119{
6120 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006121 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006122
6123 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6124 1.0f, 2.0f,
6125 3.0f, 4.0f,
6126 5.0f, 6.0f,
6127 7.0f, 8.0f,
6128 9.0f, 10.0f,
6129 11.0f, 12.0f
6130 }));
6131
Jim Flynncbb66aa2019-05-15 13:03:54 +01006132 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006133
6134 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6135 11.0f, 12.0f,
6136 13.0f, 14.0f,
6137 15.0f, 16.0f,
6138 17.0f, 18.0f,
6139
6140 }));
6141
Jim Flynncbb66aa2019-05-15 13:03:54 +01006142 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006143
6144 LayerTestResult<T, 4> result(outputTensorInfo);
6145
6146 std::vector<T> output;
6147 output.resize(outputTensorInfo.GetNumElements());
6148 Concatenate<T>(workloadFactory,
6149 memoryManager,
6150 {inputTensorInfo0, inputTensorInfo1},
6151 {input0.data(), input1.data()},
6152 outputTensorInfo,
6153 output.data(),
6154 dimension,
6155 true);
6156
6157 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6158 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6159 1.0f, 2.0f,
6160 3.0f, 4.0f,
6161 5.0f, 6.0f,
6162 7.0f, 8.0f,
6163 9.0f, 10.0f,
6164 11.0f, 12.0f,
6165 11.0f, 12.0f,
6166 13.0f, 14.0f,
6167 15.0f, 16.0f,
6168 17.0f, 18.0f
6169 }));
6170
6171 return result;
6172}
6173
6174LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6175 armnn::IWorkloadFactory& workloadFactory,
6176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6177{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006178 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6179 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006180}
6181
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006182template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006183LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6184 armnn::IWorkloadFactory& workloadFactory,
6185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6186 float qScale,
6187 int32_t qOffset)
6188{
6189 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006190 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006191
6192 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6193 1.0f, 2.0f,
6194 3.0f, 4.0f,
6195 5.0f, 6.0f,
6196 7.0f, 8.0f,
6197 9.0f, 10.0f,
6198 11.0f, 12.0f
6199 }));
6200
Jim Flynncbb66aa2019-05-15 13:03:54 +01006201 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006202
6203 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6204 11.0f, 12.0f,
6205 13.0f, 14.0f,
6206 15.0f, 16.0f,
6207 17.0f, 18.0f,
6208 19.0f, 20.0f,
6209 21.0f, 22.0f,
6210 23.0f, 24.0f,
6211 25.0f, 26.0f,
6212 27.0f, 28.0f
6213 }));
6214
Jim Flynncbb66aa2019-05-15 13:03:54 +01006215 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006216
6217 LayerTestResult<T, 4> result(outputTensorInfo);
6218
6219 std::vector<T> output;
6220 output.resize(outputTensorInfo.GetNumElements());
6221 Concatenate<T>(workloadFactory,
6222 memoryManager,
6223 {inputTensorInfo0, inputTensorInfo1},
6224 {input0.data(), input1.data()},
6225 outputTensorInfo,
6226 output.data(),
6227 dimension,
6228 true);
6229
6230 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6231 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6232 1.0f, 2.0f,
6233 3.0f, 4.0f,
6234 11.0f, 12.0f,
6235 13.0f, 14.0f,
6236 15.0f, 16.0f,
6237
6238 5.0f, 6.0f,
6239 7.0f, 8.0f,
6240 17.0f, 18.0f,
6241 19.0f, 20.0f,
6242 21.0f, 22.0f,
6243
6244 9.0f, 10.0f,
6245 11.0f, 12.0f,
6246 23.0f, 24.0f,
6247 25.0f, 26.0f,
6248 27.0f, 28.0f
6249 }));
6250
6251 return result;
6252}
6253
6254LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6255 armnn::IWorkloadFactory& workloadFactory,
6256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006258 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6259 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006260}
6261
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006262template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006263LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6264 armnn::IWorkloadFactory& workloadFactory,
6265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6266 float qScale,
6267 int32_t qOffset,
6268 bool useSubtensor)
6269{
6270 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006271 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006272
6273 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6274 1.0f, 2.0f,
6275 3.0f, 4.0f,
6276 5.0f, 6.0f,
6277 7.0f, 8.0f,
6278 9.0f, 10.0f,
6279 11.0f, 12.0f
6280 }));
6281
Jim Flynncbb66aa2019-05-15 13:03:54 +01006282 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006283
6284 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6285 11.0f, 12.0f, 13.0f,
6286 14.0f, 15.0f, 16.0f,
6287
6288 17.0f, 18.0f, 19.0f,
6289 20.0f, 21.0f, 22.0f,
6290
6291 23.0f, 24.0f, 25.0f,
6292 26.0f, 27.0f, 28.0f
6293 }));
6294
Jim Flynncbb66aa2019-05-15 13:03:54 +01006295 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006296
6297 LayerTestResult<T, 4> result(outputTensorInfo);
6298
6299 std::vector<T> output;
6300 output.resize(outputTensorInfo.GetNumElements());
6301 Concatenate<T>(workloadFactory,
6302 memoryManager,
6303 {inputTensorInfo0, inputTensorInfo1},
6304 {input0.data(), input1.data()},
6305 outputTensorInfo,
6306 output.data(),
6307 dimension,
6308 useSubtensor);
6309
6310 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6311 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6312 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6313 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6314 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6315 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6316 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6317 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6318 }));
6319
6320 return result;
6321}
6322
6323LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6324 armnn::IWorkloadFactory& workloadFactory,
6325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6326 bool useSubtensor)
6327{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006328 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6329 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006330}
6331
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006332LayerTestResult<float, 2> FakeQuantizationTest(
6333 armnn::IWorkloadFactory& workloadFactory,
6334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006335{
6336 constexpr unsigned int width = 2;
6337 constexpr unsigned int height = 3;
6338
6339 const armnn::TensorInfo tensorInfo({height, width },
6340 armnn::DataType::Float32);
6341 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6342 -10.0f, -5.0f,
6343 0.0f, 5.0f,
6344 10.0f, 10.0f
6345 }));
6346
6347 LayerTestResult<float, 2> ret(tensorInfo);
6348
6349 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6350
6351 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6352
6353 armnn::FakeQuantizationQueueDescriptor data;
6354 armnn::WorkloadInfo info;
6355
6356 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6357 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6358 float min = -10.f;
6359 float max = 10.f;
6360
6361 data.m_Parameters.m_Min = min;
6362 data.m_Parameters.m_Max = max;
6363
6364 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6365 armnn::FakeQuantizationQueueDescriptor refData = data;
6366 armnn::WorkloadInfo refInfo = info;
6367 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6368
6369 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6370
6371 inputHandle->Allocate();
6372 outputHandle->Allocate();
6373
6374 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6375
Derek Lambertif30f7d32019-04-09 10:25:02 +01006376 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006377 workload->Execute();
6378
6379 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6380
6381 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6382 0.0f, 63.0f,
6383 128.0f, 191.0f,
6384 255.0f, 255.0f
6385 }));
6386 return ret;
6387}
6388
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006389namespace
6390{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006391template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6392LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006393 armnn::IWorkloadFactory& workloadFactory,
6394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6395 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006396 float scale,
6397 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006398 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006399 float outScale,
6400 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006401 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006402 const armnn::DataLayout layout,
6403 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006404{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006405 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6406 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006407
jimfly013aab7c32018-11-12 13:32:08 +00006408 // at this point if we require it permute the input data
6409 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6410 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006411 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006412 {
6413 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006414 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006415 inputData = tmp;
6416 }
6417
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006418 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6419 inputTensorInfo.GetQuantizationScale(),
6420 inputTensorInfo.GetQuantizationOffset(),
6421 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006422
jimfly013aab7c32018-11-12 13:32:08 +00006423 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006424 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006425 {
6426 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006427 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6428 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006429 expectedOutputData = tmp;
6430 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006431
6432 LayerTestResult<T, 4> result(outputTensorInfo);
6433 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6434 outputTensorInfo.GetQuantizationScale(),
6435 outputTensorInfo.GetQuantizationOffset(),
6436 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006437
6438 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6439 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6440
6441 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006442 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006443 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006444 armnn::WorkloadInfo info;
6445
6446 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6447 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6448
6449 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6450
6451 inputHandle->Allocate();
6452 outputHandle->Allocate();
6453
6454 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6455
Derek Lambertif30f7d32019-04-09 10:25:02 +01006456 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006457 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006458
6459 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6460
6461 return result;
6462}
6463
6464float CalcInvL2Norm(std::initializer_list<float> elements)
6465{
6466 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6467 [](float acc, float element) { return acc + element * element; });
6468 return 1.0f / sqrtf(reduction);
6469}
6470
6471} // anonymous namespace
6472
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006473template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006474LayerTestResult<T, 2> Pad2dTestCommon(
6475 armnn::IWorkloadFactory& workloadFactory,
6476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6477 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006478 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006479 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006480{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006481 const armnn::TensorShape inputShape{ 3, 3 };
6482 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006483
David Monahan34757812019-06-19 11:47:21 +01006484 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6485 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006486
Derek Lambertif30f7d32019-04-09 10:25:02 +01006487 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006488 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006489 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006490 // Height (3) x Width (3)
6491 4, 8, 6,
6492 7, 4, 4,
6493 3, 2, 4
6494 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006495
Teresa Charlinec8e1982019-07-02 16:24:09 +01006496 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006497 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006498 expectedOutputValues = (
6499 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006500 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006501 p, p, p, p, p, p, p,
6502 p, p, p, p, p, p, p,
6503 p, p, 4, 8, 6, p, p,
6504 p, p, 7, 4, 4, p, p,
6505 p, p, 3, 2, 4, p, p,
6506 p, p, p, p, p, p, p,
6507 p, p, p, p, p, p, p
6508 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006509
Derek Lambertif30f7d32019-04-09 10:25:02 +01006510 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006511
Derek Lambertif30f7d32019-04-09 10:25:02 +01006512 LayerTestResult<T, 2> result(outputTensorInfo);
6513 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006514
Derek Lambertif30f7d32019-04-09 10:25:02 +01006515 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6516 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006517
Derek Lambertif30f7d32019-04-09 10:25:02 +01006518 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006519
Teresa Charlinec8e1982019-07-02 16:24:09 +01006520 std::vector<std::pair<unsigned int, unsigned int>> padList;
6521 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6522 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006523
Teresa Charlinec8e1982019-07-02 16:24:09 +01006524 descriptor.m_Parameters.m_PadList = padList;
6525 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006526 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006527
Derek Lambertif30f7d32019-04-09 10:25:02 +01006528 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6529 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006530
Derek Lambertif30f7d32019-04-09 10:25:02 +01006531 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006532
Derek Lambertif30f7d32019-04-09 10:25:02 +01006533 inputHandle->Allocate();
6534 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006535
Derek Lambertif30f7d32019-04-09 10:25:02 +01006536 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006537
Derek Lambertif30f7d32019-04-09 10:25:02 +01006538 workload->PostAllocationConfigure();
6539 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006540
Derek Lambertif30f7d32019-04-09 10:25:02 +01006541 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006542
Derek Lambertif30f7d32019-04-09 10:25:02 +01006543 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006544}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006545
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006546template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006547LayerTestResult<T, 3> Pad3dTestCommon(
6548 armnn::IWorkloadFactory& workloadFactory,
6549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6550 float qScale,
6551 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006552{
6553 const armnn::TensorShape inputShape{ 2, 2, 2 };
6554 const armnn::TensorShape outputShape{ 3, 5, 6 };
6555
David Monahan34757812019-06-19 11:47:21 +01006556 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6557 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006558
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006559 std::vector<T> inputValues(
6560 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006561 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006562 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006563 0, 4,
6564 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006565
6566 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006567 6, 1,
6568 5, 2
6569 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006570
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006571 std::vector<T> expectedOutputValues(
6572 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006573 {
6574
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006575 0, 0, 0, 0, 0, 0,
6576 0, 0, 0, 0, 0, 0,
6577 0, 0, 0, 4, 0, 0,
6578 0, 0, 2, 5, 0, 0,
6579 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006580
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006581 0, 0, 0, 0, 0, 0,
6582 0, 0, 0, 0, 0, 0,
6583 0, 0, 6, 1, 0, 0,
6584 0, 0, 5, 2, 0, 0,
6585 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006586
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006587 0, 0, 0, 0, 0, 0,
6588 0, 0, 0, 0, 0, 0,
6589 0, 0, 0, 0, 0, 0,
6590 0, 0, 0, 0, 0, 0,
6591 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006592
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006593 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006594
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006595 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006596
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006597 LayerTestResult<T, 3> result(outputTensorInfo);
6598 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006599
6600 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6601 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6602
6603 armnn::PadQueueDescriptor descriptor;
6604
6605 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6606 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6607 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6608 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6609
6610 descriptor.m_Parameters.m_PadList = PadList;
6611 armnn::WorkloadInfo info;
6612
6613 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6614 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6615
6616 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6617
6618 inputHandle->Allocate();
6619 outputHandle->Allocate();
6620
6621 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6622
Derek Lambertif30f7d32019-04-09 10:25:02 +01006623 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006624 workload->Execute();
6625
6626 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6627
6628 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006629}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006630
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006631template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006632LayerTestResult<T, 4> Pad4dTestCommon(
6633 armnn::IWorkloadFactory& workloadFactory,
6634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6635 float qScale,
6636 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006637{
6638 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6639 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6640
David Monahan34757812019-06-19 11:47:21 +01006641 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6642 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006643
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006644 std::vector<T> inputValues(
6645 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006646 {
6647 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006648 0, 1,
6649 2, 3,
6650 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006651
6652 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006653 6, 7,
6654 8, 9,
6655 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006656
6657 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006658 12, 13,
6659 14, 15,
6660 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006661
6662 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006663 18, 19,
6664 20, 21,
6665 22, 23
6666 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006667
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006668 std::vector<T> expectedOutputValues(
6669 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006670 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006671 0, 0, 0, 0,
6672 0, 0, 0, 0,
6673 0, 0, 0, 0,
6674 0, 0, 0, 0,
6675 0, 0, 0, 0,
6676 0, 0, 0, 0,
6677 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006678
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006679 0, 0, 0, 0,
6680 0, 0, 0, 0,
6681 0, 0, 0, 0,
6682 0, 0, 0, 0,
6683 0, 0, 0, 0,
6684 0, 0, 0, 0,
6685 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006686
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006687 0, 0, 0, 0,
6688 0, 0, 0, 0,
6689 0, 0, 0, 0,
6690 0, 0, 0, 0,
6691 0, 0, 0, 0,
6692 0, 0, 0, 0,
6693 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006694
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006695 0, 0, 0, 0,
6696 0, 0, 0, 0,
6697 0, 0, 0, 0,
6698 0, 0, 0, 0,
6699 0, 0, 0, 0,
6700 0, 0, 0, 0,
6701 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006702
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006703 0, 0, 0, 0,
6704 0, 0, 0, 0,
6705 0, 0, 0, 0,
6706 0, 0, 0, 0,
6707 0, 0, 0, 0,
6708 0, 0, 0, 0,
6709 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006710
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006711 0, 0, 0, 0,
6712 0, 0, 0, 0,
6713 0, 0, 0, 0,
6714 0, 0, 0, 0,
6715 0, 0, 0, 0,
6716 0, 0, 0, 0,
6717 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006718
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006719 0, 0, 0, 0,
6720 0, 0, 0, 0,
6721 0, 0, 0, 0,
6722 0, 0, 0, 0,
6723 0, 0, 0, 0,
6724 0, 0, 0, 0,
6725 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006727 0, 0, 0, 0,
6728 0, 0, 0, 0,
6729 0, 0, 0, 0,
6730 0, 0, 1, 0,
6731 0, 2, 3, 0,
6732 0, 4, 5, 0,
6733 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006734
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006735 0, 0, 0, 0,
6736 0, 0, 0, 0,
6737 0, 0, 0, 0,
6738 0, 6, 7, 0,
6739 0, 8, 9, 0,
6740 0, 10, 11, 0,
6741 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006743 0, 0, 0, 0,
6744 0, 0, 0, 0,
6745 0, 0, 0, 0,
6746 0, 0, 0, 0,
6747 0, 0, 0, 0,
6748 0, 0, 0, 0,
6749 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006751 0, 0, 0, 0,
6752 0, 0, 0, 0,
6753 0, 0, 0, 0,
6754 0, 0, 0, 0,
6755 0, 0, 0, 0,
6756 0, 0, 0, 0,
6757 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006758
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006759 0, 0, 0, 0,
6760 0, 0, 0, 0,
6761 0, 0, 0, 0,
6762 0, 0, 0, 0,
6763 0, 0, 0, 0,
6764 0, 0, 0, 0,
6765 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006766
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006767 0, 0, 0, 0,
6768 0, 0, 0, 0,
6769 0, 0, 0, 0,
6770 0, 12, 13, 0,
6771 0, 14, 15, 0,
6772 0, 16, 17, 0,
6773 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006774
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006775 0, 0, 0, 0,
6776 0, 0, 0, 0,
6777 0, 0, 0, 0,
6778 0, 18, 19, 0,
6779 0, 20, 21, 0,
6780 0, 22, 23, 0,
6781 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006782
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006783 0, 0, 0, 0,
6784 0, 0, 0, 0,
6785 0, 0, 0, 0,
6786 0, 0, 0, 0,
6787 0, 0, 0, 0,
6788 0, 0, 0, 0,
6789 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006790
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006791 0, 0, 0, 0,
6792 0, 0, 0, 0,
6793 0, 0, 0, 0,
6794 0, 0, 0, 0,
6795 0, 0, 0, 0,
6796 0, 0, 0, 0,
6797 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006798
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006799 0, 0, 0, 0,
6800 0, 0, 0, 0,
6801 0, 0, 0, 0,
6802 0, 0, 0, 0,
6803 0, 0, 0, 0,
6804 0, 0, 0, 0,
6805 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006807 0, 0, 0, 0,
6808 0, 0, 0, 0,
6809 0, 0, 0, 0,
6810 0, 0, 0, 0,
6811 0, 0, 0, 0,
6812 0, 0, 0, 0,
6813 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006814
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006815 0, 0, 0, 0,
6816 0, 0, 0, 0,
6817 0, 0, 0, 0,
6818 0, 0, 0, 0,
6819 0, 0, 0, 0,
6820 0, 0, 0, 0,
6821 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006822
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006823 0, 0, 0, 0,
6824 0, 0, 0, 0,
6825 0, 0, 0, 0,
6826 0, 0, 0, 0,
6827 0, 0, 0, 0,
6828 0, 0, 0, 0,
6829 0, 0, 0, 0
6830 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006832 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006833
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006834 LayerTestResult<T, 4> result(outputTensorInfo);
6835 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006836
6837 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6838 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6839
6840 armnn::PadQueueDescriptor descriptor;
6841
6842 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6843 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6844 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6845 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6846 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6847
6848 descriptor.m_Parameters.m_PadList = PadList;
6849 armnn::WorkloadInfo info;
6850
6851 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6852 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6853
6854 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6855
6856 inputHandle->Allocate();
6857 outputHandle->Allocate();
6858
6859 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6860
Derek Lambertif30f7d32019-04-09 10:25:02 +01006861 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006862 workload->Execute();
6863
6864 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6865
6866 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006867}
6868
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006869LayerTestResult<uint8_t, 2> PadUint82dTest(
6870 armnn::IWorkloadFactory& workloadFactory,
6871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006872{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006873 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006874}
6875
David Monahan34757812019-06-19 11:47:21 +01006876LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6877 armnn::IWorkloadFactory& workloadFactory,
6878 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6879{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006880 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006881}
6882
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006883LayerTestResult<uint8_t, 3> PadUint83dTest(
6884 armnn::IWorkloadFactory& workloadFactory,
6885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006886{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006887 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006888}
6889
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006890LayerTestResult<uint8_t, 4> PadUint84dTest(
6891 armnn::IWorkloadFactory& workloadFactory,
6892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006893{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006894 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006895}
6896
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006897
6898template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6899Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6900 armnn::IWorkloadFactory& workloadFactory,
6901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6902 float qScale,
6903 int32_t qOffset,
6904 const float customPaddingValue);
6905
6906template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6907Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6908 armnn::IWorkloadFactory& workloadFactory,
6909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6910 float qScale,
6911 int32_t qOffset);
6912
6913template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6914Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6915 armnn::IWorkloadFactory& workloadFactory,
6916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6917 float qScale,
6918 int32_t qOffset);
6919
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006920LayerTestResult<float, 2> PadFloat322dTest(
6921 armnn::IWorkloadFactory& workloadFactory,
6922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006923{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006924 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006925}
6926
David Monahan34757812019-06-19 11:47:21 +01006927LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6928 armnn::IWorkloadFactory& workloadFactory,
6929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6930{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006931 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006932}
6933
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006934LayerTestResult<float, 3> PadFloat323dTest(
6935 armnn::IWorkloadFactory& workloadFactory,
6936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006937{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006938 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006939}
6940
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006941LayerTestResult<float, 4> PadFloat324dTest(
6942 armnn::IWorkloadFactory& workloadFactory,
6943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006944{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006945 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006946}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006947
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006948template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006949LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6950 armnn::IWorkloadFactory& workloadFactory,
6951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6952 float scale,
6953 int32_t offset,
6954 float outScale,
6955 int32_t outOffset,
6956 const armnn::DataLayout layout,
6957 float epsilon)
6958{
6959 // Width: 1
6960 // Height: 1
6961 // Channels: 3
6962 // BatchSize: 1
6963 unsigned int numberOfBatches = 1;
6964 unsigned int numberOfChannels = 3;
6965 unsigned int height = 1;
6966 unsigned int width = 1;
6967
6968 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6969 numberOfBatches, numberOfChannels, height, width, layout);
6970
6971 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6972 std::vector<float> inputValues
6973 {
6974 // Batch 0, Channel 0, Height (1) x Width (1)
6975 0.00000001f,
6976
6977 // Batch 0, Channel 1, Height (1) x Width (1)
6978 0.00000002f,
6979
6980 // Batch 0, Channel 2, Height (1) x Width (1)
6981 0.00000003f,
6982 };
6983
6984 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6985 std::vector<float> expectedOutputValues
6986 {
6987 // Batch 0, Channel 0, Height (1) x Width (1)
6988 0.00000001f * approxInvL2Norm,
6989 0.00000002f * approxInvL2Norm,
6990 0.00000003f * approxInvL2Norm,
6991 };
6992
6993 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6994 inputValues, outScale, outOffset, expectedOutputValues, layout,
6995 epsilon);
6996}
6997
6998
6999template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007000LayerTestResult<T, 4> L2Normalization1dTestCommon(
7001 armnn::IWorkloadFactory& workloadFactory,
7002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007003 float scale,
7004 int32_t offset,
7005 float outScale,
7006 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007007 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007008{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007009 // Width: 1
7010 // Height: 1
7011 // Channels: 10
7012 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007013 unsigned int numberOfBatches = 1;
7014 unsigned int numberOfChannels = 10;
7015 unsigned int height = 1;
7016 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00007017
jimfly013aab7c32018-11-12 13:32:08 +00007018
Nina Drozdd41b2592018-11-19 13:03:36 +00007019 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007020 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007021 std::vector<float> inputValues
7022 {
7023 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007024 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00007025
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007026 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007027 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00007028
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007029 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007030 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00007031
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007032 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007033 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007034
7035 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007036 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007037
7038 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007039 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007040
7041 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007042 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007043
7044 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007045 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007046
7047 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007048 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007049
7050 // Batch 0, Channel 9, Height (1) x Width (1)
7051 10.0f
7052 };
telsoa014fcda012018-03-09 14:13:49 +00007053 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007054 std::vector<float> expectedOutputValues
7055 {
7056 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007057 1.0f * approxInvL2Norm,
7058 2.0f * approxInvL2Norm,
7059 3.0f * approxInvL2Norm,
7060 4.0f * approxInvL2Norm,
7061 5.0f * approxInvL2Norm,
7062 6.0f * approxInvL2Norm,
7063 7.0f * approxInvL2Norm,
7064 8.0f * approxInvL2Norm,
7065 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00007066 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007067 };
telsoa014fcda012018-03-09 14:13:49 +00007068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007069
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007070 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7071 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00007072}
7073
Ferran Balaguere52211e2019-06-17 12:23:52 +01007074LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7075 armnn::IWorkloadFactory& workloadFactory,
7076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7077 const armnn::DataLayout layout)
7078{
7079 // Dummy descriptor to get the default value of epsilon.
7080 armnn::L2NormalizationDescriptor descriptor;
7081
7082 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7083 layout, descriptor.m_Eps);
7084}
7085
7086LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7087 armnn::IWorkloadFactory& workloadFactory,
7088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7089 const armnn::DataLayout layout)
7090{
7091 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7092 layout, 1e-9f);
7093}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007094
7095LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007096 armnn::IWorkloadFactory& workloadFactory,
7097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007098 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007099{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007100 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007101}
7102
7103LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7104 armnn::IWorkloadFactory& workloadFactory,
7105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7106 const armnn::DataLayout layout)
7107{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007108 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007109 layout);
7110}
7111
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007112LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7113 armnn::IWorkloadFactory& workloadFactory,
7114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7115 const armnn::DataLayout layout)
7116{
7117 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7118 1.f/128, 128, layout);
7119}
7120
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007121template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7122LayerTestResult<T, 4> L2Normalization2dTestCommon(
7123 armnn::IWorkloadFactory& workloadFactory,
7124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007125 float scale,
7126 int32_t offset,
7127 float outScale,
7128 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007129 const armnn::DataLayout layout)
7130{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007131 // Width: 5
7132 // Height: 1
7133 // Channels: 2
7134 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007135 unsigned int numberOfBatches = 1;
7136 unsigned int numberOfChannels = 2;
7137 unsigned int height = 1;
7138 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00007139
Nina Drozdd41b2592018-11-19 13:03:36 +00007140 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007141 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007142 std::vector<float> inputValues
7143 {
7144 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00007145 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00007146
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007147 // Batch 0, Channel 1, Height (1) x Width (5)
7148 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7149 };
7150 std::vector<float> expectedOutputValues
7151 {
7152 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007153 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7154 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7155 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7156 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7157 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007158
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007159 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007160 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7161 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7162 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7163 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007164 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007165 };
telsoa014fcda012018-03-09 14:13:49 +00007166
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007167 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7168 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007169}
telsoa014fcda012018-03-09 14:13:49 +00007170
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007171LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007172 armnn::IWorkloadFactory& workloadFactory,
7173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007174 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007175{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007176 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7177 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007178}
7179
7180LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7181 armnn::IWorkloadFactory& workloadFactory,
7182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7183 const armnn::DataLayout layout)
7184{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007185 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007186 layout);
7187}
7188
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007189LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7190 armnn::IWorkloadFactory& workloadFactory,
7191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7192 const armnn::DataLayout layout)
7193{
7194 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7195 1.f/128, 128, layout);
7196}
7197
Matthew Jackson82b15ed2019-07-25 16:14:30 +01007198LayerTestResult<float, 2> L2Normalization2dShapeTest(
7199 armnn::IWorkloadFactory& workloadFactory,
7200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7201{
7202 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
7203 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
7204
7205 std::vector<float> inputData
7206 {
7207 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
7208 };
7209 std::vector<float> expectedOutputData
7210 {
7211 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7212 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7213 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7214 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7215 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7216 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7217 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7218 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7219 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7220 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7221 };
7222
7223 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7224 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7225
7226 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
7227 inputTensorInfo.GetQuantizationScale(),
7228 inputTensorInfo.GetQuantizationOffset(),
7229 inputData));
7230
7231 LayerTestResult<float, 2> result(outputTensorInfo);
7232 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
7233 outputTensorInfo.GetQuantizationScale(),
7234 outputTensorInfo.GetQuantizationOffset(),
7235 expectedOutputData));
7236
7237 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7238 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7239
7240 armnn::L2NormalizationQueueDescriptor descriptor;
7241 descriptor.m_Parameters.m_Eps = 1e-12f;
7242 descriptor.m_Parameters.m_DataLayout = layout;
7243 armnn::WorkloadInfo info;
7244
7245 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7246 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7247
7248 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
7249
7250 inputHandle->Allocate();
7251 outputHandle->Allocate();
7252
7253 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7254
7255 workload->PostAllocationConfigure();
7256 ExecuteWorkload(*workload, memoryManager);
7257
7258 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7259
7260 return result;
7261}
7262
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007263template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7264LayerTestResult<T, 4> L2Normalization3dTestCommon(
7265 armnn::IWorkloadFactory& workloadFactory,
7266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007267 float scale,
7268 int32_t offset,
7269 float outScale,
7270 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007271 const armnn::DataLayout layout)
7272{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007273 // Width: 3
7274 // Height: 4
7275 // Channels: 2
7276 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007277 unsigned int numberOfBatches = 1;
7278 unsigned int numberOfChannels = 2;
7279 unsigned int height = 4;
7280 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007281
Nina Drozdd41b2592018-11-19 13:03:36 +00007282 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007283 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007284 std::vector<float> inputValues
7285 {
7286 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007287 119.0f, 21.0f, 150.0f,
7288 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007289 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00007290 147.0f, 199.0f, 220.0f,
7291
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007292 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007293 110.0f, 140.0f, 73.0f,
7294 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007295 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007296 162.0f, 12.0f, 161.0f
7297 };
7298 std::vector<float> expectedOutputValues
7299 {
7300 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007301 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007302 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007303 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
7304 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007305 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007306 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007307 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007308 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7309 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7310 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7311 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
7312 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7313
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007314 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007315 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7316 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007317 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007318 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7319 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007320 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
7321 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007322 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7323 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7324 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007325 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007326 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7327 };
telsoa014fcda012018-03-09 14:13:49 +00007328
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007329 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7330 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007331}
telsoa014fcda012018-03-09 14:13:49 +00007332
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007333LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007334 armnn::IWorkloadFactory& workloadFactory,
7335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007336 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007337{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007338 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7339 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007340}
7341
7342LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7343 armnn::IWorkloadFactory& workloadFactory,
7344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7345 const armnn::DataLayout layout)
7346{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007347 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007348 layout);
7349}
7350
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007351LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7352 armnn::IWorkloadFactory& workloadFactory,
7353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7354 const armnn::DataLayout layout)
7355{
7356 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7357 1.f/128, 128, layout);
7358}
7359
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007360template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7361LayerTestResult<T, 4> L2Normalization4dTestCommon(
7362 armnn::IWorkloadFactory& workloadFactory,
7363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007364 float scale,
7365 int32_t offset,
7366 float outScale,
7367 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007368 const armnn::DataLayout layout)
7369{
7370 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007371 // Height: 4
7372 // Channels: 3
7373 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00007374 unsigned int numberOfBatches = 2;
7375 unsigned int numberOfChannels = 3;
7376 unsigned int height = 4;
7377 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007378
Nina Drozdd41b2592018-11-19 13:03:36 +00007379 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007380 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007381 std::vector<float> inputValues
7382 {
7383 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007384 235.0f, 46.0f, 178.0f,
7385 100.0f, 123.0f, 19.0f,
7386 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007387 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00007388
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007389 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007390 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007391 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00007392 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007393 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00007394
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007395 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007396 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00007397 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007398 12.0f, 209.0f, 200.0f,
7399 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00007400
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007401 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007402 67.0f, 90.0f, 49.0f,
7403 7.0f, 163.0f, 18.0f,
7404 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00007405 247.0f, 59.0f, 189.0f,
7406
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007407 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007408 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007409 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00007410 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007411 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00007412
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007413 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007414 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00007415 115.0f, 116.0f, 238.0f,
7416 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007417 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007418 };
7419 std::vector<float> expectedOutputValues
7420 {
7421 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007422 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007423 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007424 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7425 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7426 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007427 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007428 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007429 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007430 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007431 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007432 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007433 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007434
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007435 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007436 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007437 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007438 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007439 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007440 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007441 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007442 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7443 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7444 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007445 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7446 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7447 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007448
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007449 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007450 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007451 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7452 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7453 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007454 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007455 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007456 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007457 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7458 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007459 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7460 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7461 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007462
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007463 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007464 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7465 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7466 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7467 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007468 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007469 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7470 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007471 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7472 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7473 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007474 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007475 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7476
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007477 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007478 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7479 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7480 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007481 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007482 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7483 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7484 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7485 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007486 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7487 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007488 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007489 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007490
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007491 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007492 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007493 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7494 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7495 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7496 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7497 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7498 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007499 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007500 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007501 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007502 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007503 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007504 };
telsoa014fcda012018-03-09 14:13:49 +00007505
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007506 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7507 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007508}
7509
7510LayerTestResult<float, 4> L2Normalization4dTest(
7511 armnn::IWorkloadFactory& workloadFactory,
7512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7513 const armnn::DataLayout layout)
7514{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007515 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7516 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007517}
7518
7519LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7520 armnn::IWorkloadFactory& workloadFactory,
7521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7522 const armnn::DataLayout layout)
7523{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007524 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007525 layout);
telsoa014fcda012018-03-09 14:13:49 +00007526}
7527
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007528LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7529 armnn::IWorkloadFactory& workloadFactory,
7530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7531 const armnn::DataLayout layout)
7532{
7533 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7534 1.f/128, 128, layout);
7535}
7536
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007537template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007538LayerTestResult<T, 4> ConstantTestImpl(
7539 armnn::IWorkloadFactory& workloadFactory,
7540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007541 float qScale,
7542 int32_t qOffset)
7543{
7544 constexpr unsigned int inputWidth = 3;
7545 constexpr unsigned int inputHeight = 4;
7546 constexpr unsigned int inputChannels = 3;
7547 constexpr unsigned int inputBatchSize = 2;
7548
7549 constexpr unsigned int outputWidth = inputWidth;
7550 constexpr unsigned int outputHeight = inputHeight;
7551 constexpr unsigned int outputChannels = inputChannels;
7552 constexpr unsigned int outputBatchSize = inputBatchSize;
7553
Nina Drozd58ef2c62019-05-16 12:09:18 +01007554 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7555 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007556
Nina Drozd58ef2c62019-05-16 12:09:18 +01007557 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7558 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007559
7560 // Set quantization parameters if the requested type is a quantized type.
7561 if(armnn::IsQuantizedType<T>())
7562 {
7563 inputTensorInfo.SetQuantizationScale(qScale);
7564 inputTensorInfo.SetQuantizationOffset(qOffset);
7565 outputTensorInfo.SetQuantizationScale(qScale);
7566 outputTensorInfo.SetQuantizationOffset(qOffset);
7567 }
7568
7569 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7570 QuantizedVector<T>(qScale, qOffset, {
7571 // Batch 0, Channel 0
7572 235.0f, 46.0f, 178.0f,
7573 100.0f, 123.0f, 19.0f,
7574 172.0f, 74.0f, 250.0f,
7575 6.0f, 195.0f, 80.0f,
7576
7577 // Batch 0, Channel 1
7578 113.0f, 95.0f, 202.0f,
7579 77.0f, 114.0f, 71.0f,
7580 122.0f, 246.0f, 166.0f,
7581 82.0f, 28.0f, 37.0f,
7582
7583 // Batch 0, Channel 2
7584 56.0f, 170.0f, 162.0f,
7585 194.0f, 89.0f, 254.0f,
7586 12.0f, 209.0f, 200.0f,
7587 1.0f, 64.0f, 54.0f,
7588
7589 // Batch 1, Channel 0
7590 67.0f, 90.0f, 49.0f,
7591 7.0f, 163.0f, 18.0f,
7592 25.0f, 117.0f, 103.0f,
7593 247.0f, 59.0f, 189.0f,
7594
7595 // Batch 1, Channel 1
7596 239.0f, 104.0f, 199.0f,
7597 17.0f, 124.0f, 153.0f,
7598 222.0f, 217.0f, 75.0f,
7599 32.0f, 126.0f, 21.0f,
7600
7601 // Batch 1, Channel 2
7602 97.0f, 145.0f, 215.0f,
7603 115.0f, 116.0f, 238.0f,
7604 226.0f, 16.0f, 132.0f,
7605 92.0f, 125.0f, 88.0f,
7606 })));
7607
7608 LayerTestResult<T, 4> result(outputTensorInfo);
7609 result.outputExpected = input;
7610
7611 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7612
7613 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7614 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7615
7616 armnn::ConstantQueueDescriptor descriptor;
7617 descriptor.m_LayerOutput = &constantTensor;
7618
7619 armnn::WorkloadInfo info;
7620 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7621
7622 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7623
7624 outputHandle->Allocate();
7625
Derek Lambertif30f7d32019-04-09 10:25:02 +01007626 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007627 workload->Execute();
7628
7629 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7630 return result;
7631}
7632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007633LayerTestResult<float, 4> ConstantTest(
7634 armnn::IWorkloadFactory& workloadFactory,
7635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007636{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007637 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007638}
7639
Nina Drozd58ef2c62019-05-16 12:09:18 +01007640LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7641 armnn::IWorkloadFactory& workloadFactory,
7642 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7643{
7644 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7645}
7646
7647LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007648 armnn::IWorkloadFactory& workloadFactory,
7649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007651 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007652}
7653
Jim Flynn4ed6c832019-05-20 11:02:46 +01007654LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007655 armnn::IWorkloadFactory& workloadFactory,
7656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7657{
7658 unsigned int outputWidth = 3;
7659 unsigned int outputHeight = 6;
7660 unsigned int outputChannels = 3;
7661
7662 unsigned int inputWidth1 = 3;
7663 unsigned int inputHeight1 = 6;
7664 unsigned int inputChannels1 = 2;
7665
7666 unsigned int inputWidth2 = 3;
7667 unsigned int inputHeight2 = 6;
7668 unsigned int inputChannels2 = 1;
7669
7670 // Defines the tensor descriptors.
7671 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7672 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7673 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7674
7675 // Quantized input1 tensor. Range [-3, 1]
7676 const float inputScale1 = 0.015686f;
7677 const int32_t inputOffset1 = 192;
7678
7679 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7680 {
7681 1, 2, 3,
7682 4, 5, 6,
7683 7, 8, 9,
7684 10, 11, 12,
7685 13, 14, 15,
7686 16, 17, 18,
7687
7688 19, 20, 21,
7689 22, 23, 24,
7690 25, 26, 27,
7691 28, 29, 30,
7692 31, 32, 33,
7693 34, 35, 36,
7694 })
7695 );
7696
7697 // Quatized input2 tensor. Range [-1, 4]
7698 const float inputScale2 = 0.019608f;
7699 const int32_t inputOffset2 = 50;
7700
7701 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7702 {
7703 37, 38, 39,
7704 40, 41, 42,
7705 43, 44, 45,
7706 46, 47, 48,
7707 49, 50, 51,
7708 52, 53, 54,
7709 })
7710 );
7711
7712 // Output has the same quantization parameters than input1,
7713 // so that only the requantization of input2 is required
7714 const float outputScale = 0.015686f;
7715 const int32_t outputOffset = 192;
7716
7717 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7718
7719 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7720 {
7721 1, 2, 3,
7722 4, 5, 6,
7723 7, 8, 9,
7724 10, 11, 12,
7725 13, 14, 15,
7726 16, 17, 18,
7727
7728 19, 20, 21,
7729 22, 23, 24,
7730 25, 26, 27,
7731 28, 29, 30,
7732 31, 32, 33,
7733 34, 35, 36,
7734
7735 176, 177, 178,
7736 179, 181, 182,
7737 183, 184, 186,
7738 187, 188, 189,
7739 191, 192, 193,
7740 195, 196, 197,
7741 })
7742 );
7743
7744 outputTensorInfo.SetQuantizationScale(outputScale);
7745 outputTensorInfo.SetQuantizationOffset(outputOffset);
7746 inputTensorInfo1.SetQuantizationScale(inputScale1);
7747 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7748 inputTensorInfo2.SetQuantizationScale(inputScale2);
7749 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7750
7751 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007752 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007753
7754 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007755 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007756
7757 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7758
7759 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7760
7761 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7762 subTensorsSupported ?
7763 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7764 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7765
7766 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7767 subTensorsSupported ?
7768 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7769 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7770
Jim Flynne242f2d2019-05-22 14:24:13 +01007771 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007772 armnn::WorkloadInfo info;
7773 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7774 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7775 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7776
7777 data.m_ViewOrigins.push_back(window1);
7778 data.m_ViewOrigins.push_back(window2);
7779
Jim Flynn4ed6c832019-05-20 11:02:46 +01007780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007781
7782 inputHandle1->Allocate();
7783 inputHandle2->Allocate();
7784 outputHandle->Allocate();
7785
7786 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7787 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7788
Derek Lambertif30f7d32019-04-09 10:25:02 +01007789 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007790 workload->Execute();
7791
7792 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7793
7794 return ret;
7795}
7796
Jim Flynn4ed6c832019-05-20 11:02:46 +01007797LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007798 armnn::IWorkloadFactory& workloadFactory,
7799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007800{
surmeh013537c2c2018-05-18 16:31:43 +01007801 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007802 unsigned int outputHeight = 6;
7803 unsigned int outputChannels = 3;
7804
surmeh013537c2c2018-05-18 16:31:43 +01007805 unsigned int inputWidth1 = 3;
7806 unsigned int inputHeight1 = 6;
7807 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007808
surmeh013537c2c2018-05-18 16:31:43 +01007809 unsigned int inputWidth2 = 3;
7810 unsigned int inputHeight2 = 6;
7811 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007812
telsoa01c577f2c2018-08-31 09:22:23 +01007813 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007814 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7815 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7816 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007817
Jim Flynn4ed6c832019-05-20 11:02:46 +01007818 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007819 const float scale = 0.13497836f;
7820 const int32_t offset = -7;
7821
7822 outputTensorInfo.SetQuantizationScale(scale);
7823 outputTensorInfo.SetQuantizationOffset(offset);
7824 inputTensorInfo1.SetQuantizationScale(scale);
7825 inputTensorInfo1.SetQuantizationOffset(offset);
7826 inputTensorInfo2.SetQuantizationScale(scale);
7827 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007828
7829 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7830
7831 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007832 {
7833 1, 2, 3,
7834 4, 5, 6,
7835 7, 8, 9,
7836 10, 11, 12,
7837 13, 14, 15,
7838 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007839
surmeh013537c2c2018-05-18 16:31:43 +01007840 19, 20, 21,
7841 22, 23, 24,
7842 25, 26, 27,
7843 28, 29, 30,
7844 31, 32, 33,
7845 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007846
surmeh013537c2c2018-05-18 16:31:43 +01007847 37, 38, 39,
7848 40, 41, 42,
7849 43, 44, 45,
7850 46, 47, 48,
7851 49, 50, 51,
7852 52, 53, 54,
7853 })
telsoa014fcda012018-03-09 14:13:49 +00007854 );
7855
telsoa014fcda012018-03-09 14:13:49 +00007856 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7857 {
surmeh013537c2c2018-05-18 16:31:43 +01007858 1, 2, 3,
7859 4, 5, 6,
7860 7, 8, 9,
7861 10, 11, 12,
7862 13, 14, 15,
7863 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007864
surmeh013537c2c2018-05-18 16:31:43 +01007865 19, 20, 21,
7866 22, 23, 24,
7867 25, 26, 27,
7868 28, 29, 30,
7869 31, 32, 33,
7870 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007871 })
7872 );
7873
7874 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7875 {
surmeh013537c2c2018-05-18 16:31:43 +01007876 37, 38, 39,
7877 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007878 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007879 46, 47, 48,
7880 49, 50, 51,
7881 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007882 })
7883 );
7884
telsoa01c577f2c2018-08-31 09:22:23 +01007885 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007886 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007887
telsoa01c577f2c2018-08-31 09:22:23 +01007888 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007889 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007890
telsoa014fcda012018-03-09 14:13:49 +00007891
7892 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7893
7894 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7895
7896 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7897 subTensorsSupported ?
7898 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7899 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7900
7901 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7902 subTensorsSupported ?
7903 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7904 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7905
telsoa014fcda012018-03-09 14:13:49 +00007906
Jim Flynne242f2d2019-05-22 14:24:13 +01007907 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007908 armnn::WorkloadInfo info;
7909 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7910 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007911 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7912
7913 data.m_ViewOrigins.push_back(window1);
7914 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007915
Jim Flynn4ed6c832019-05-20 11:02:46 +01007916 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007917
7918 inputHandle1->Allocate();
7919 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007920 outputHandle->Allocate();
7921
7922 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7923 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007924
Derek Lambertif30f7d32019-04-09 10:25:02 +01007925 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007926 workload->Execute();
7927
7928 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7929
7930 return ret;
7931}
7932
Jim Flynn4ed6c832019-05-20 11:02:46 +01007933LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007934 armnn::IWorkloadFactory& workloadFactory,
7935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7936{
7937 unsigned int outputWidth = 3;
7938 unsigned int outputHeight = 6;
7939 unsigned int outputChannels = 3;
7940
7941 unsigned int inputWidth1 = 3;
7942 unsigned int inputHeight1 = 6;
7943 unsigned int inputChannels1 = 2;
7944
7945 unsigned int inputWidth2 = 3;
7946 unsigned int inputHeight2 = 6;
7947 unsigned int inputChannels2 = 1;
7948
7949 // Defines the tensor descriptors.
7950 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7951 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7952 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7953
Jim Flynn4ed6c832019-05-20 11:02:46 +01007954 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007955 const float scale = 0.13497836f;
7956 const int32_t offset = -7;
7957
7958 outputTensorInfo.SetQuantizationScale(scale);
7959 outputTensorInfo.SetQuantizationOffset(offset);
7960 inputTensorInfo1.SetQuantizationScale(scale);
7961 inputTensorInfo1.SetQuantizationOffset(offset);
7962 inputTensorInfo2.SetQuantizationScale(scale);
7963 inputTensorInfo2.SetQuantizationOffset(offset);
7964
7965 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7966
7967 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7968 {
7969 1, 2, 3,
7970 4, 5, 6,
7971 7, 8, 9,
7972 10, 11, 12,
7973 13, 14, 15,
7974 16, 17, 18,
7975
7976 19, 20, 21,
7977 22, 23, 24,
7978 25, 26, 27,
7979 28, 29, 30,
7980 31, 32, 33,
7981 34, 35, 36,
7982
7983 37, 38, 39,
7984 40, 41, 42,
7985 43, 44, 45,
7986 46, 47, 48,
7987 49, 50, 51,
7988 52, 53, 54,
7989 }));
7990
7991 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7992 {
7993 1, 2, 3,
7994 4, 5, 6,
7995 7, 8, 9,
7996 10, 11, 12,
7997 13, 14, 15,
7998 16, 17, 18,
7999
8000 19, 20, 21,
8001 22, 23, 24,
8002 25, 26, 27,
8003 28, 29, 30,
8004 31, 32, 33,
8005 34, 35, 36,
8006 }));
8007
8008 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
8009 {
8010 37, 38, 39,
8011 40, 41, 42,
8012 43, 44, 45,
8013 46, 47, 48,
8014 49, 50, 51,
8015 52, 53, 54,
8016 }));
8017
8018 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01008019 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008020
8021 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01008022 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008023
8024
8025 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8026
8027 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8028
8029 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8030 subTensorsSupported ?
8031 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8032 workloadFactory.CreateTensorHandle(inputTensorInfo1);
8033
8034 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8035 subTensorsSupported ?
8036 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8037 workloadFactory.CreateTensorHandle(inputTensorInfo2);
8038
8039
Jim Flynne242f2d2019-05-22 14:24:13 +01008040 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01008041 armnn::WorkloadInfo info;
8042 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8043 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8044 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8045
8046 data.m_ViewOrigins.push_back(window1);
8047 data.m_ViewOrigins.push_back(window2);
8048
Jim Flynn4ed6c832019-05-20 11:02:46 +01008049 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008050
8051 inputHandle1->Allocate();
8052 inputHandle2->Allocate();
8053 outputHandle->Allocate();
8054
8055 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8056 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8057
8058 workload->PostAllocationConfigure();
8059 workload->Execute();
8060
8061 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8062
8063 return ret;
8064}
telsoa014fcda012018-03-09 14:13:49 +00008065
surmeh01bceff2f2018-03-29 16:29:27 +01008066namespace
telsoa014fcda012018-03-09 14:13:49 +00008067{
Sadik Armagan2999a022019-04-09 14:20:12 +01008068template <typename T>
8069LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008070 armnn::IWorkloadFactory& workloadFactory,
8071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8072 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008073 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008074 float scale0,
8075 int32_t offset0,
8076 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008077 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008078 float scale1,
8079 int32_t offset1,
8080 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008081 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008082 float outScale,
8083 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01008084{
Sadik Armagan2999a022019-04-09 14:20:12 +01008085 auto dataType = (std::is_same<T, uint8_t>::value ?
8086 armnn::DataType::QuantisedAsymm8 :
8087 armnn::DataType::QuantisedSymm16);
8088
8089 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8090 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8091 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00008092
surmeh01bceff2f2018-03-29 16:29:27 +01008093 inputTensorInfo0.SetQuantizationScale(scale0);
8094 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00008095
surmeh01bceff2f2018-03-29 16:29:27 +01008096 inputTensorInfo1.SetQuantizationScale(scale1);
8097 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00008098
surmeh01bceff2f2018-03-29 16:29:27 +01008099 outputTensorInfo.SetQuantizationScale(outScale);
8100 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00008101
Sadik Armagan2999a022019-04-09 14:20:12 +01008102 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8103 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00008104
Sadik Armagan2999a022019-04-09 14:20:12 +01008105 LayerTestResult<T, 4> result(outputTensorInfo);
8106 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8107
8108 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8109 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8110 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8111
8112 armnn::AdditionQueueDescriptor data;
8113 armnn::WorkloadInfo info;
8114 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8115 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8116 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8117
8118 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8119
8120 inputHandle0->Allocate();
8121 inputHandle1->Allocate();
8122 outputHandle->Allocate();
8123
8124 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8125 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8126
Derek Lambertif30f7d32019-04-09 10:25:02 +01008127 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01008128 workload->Execute();
8129
8130 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8131
8132 return result;
8133}
8134} // anonymous namespace
8135
8136LayerTestResult<uint8_t, 4> AdditionUint8Test(
8137 armnn::IWorkloadFactory& workloadFactory,
8138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8139{
8140 const unsigned int shape0[] = { 1, 2, 2, 3 };
8141 const unsigned int shape1[] = { 1, 2, 2, 3 };
8142
8143 std::vector<uint8_t> input0(
8144 {
8145 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
8146 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
8147 });
8148
8149 std::vector<uint8_t> input1(
8150 {
8151 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8152 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8153 });
8154
8155 std::vector<uint8_t> output(
8156 {
8157 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
8158 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8159 });
8160
8161 return AdditionQuantizeTestHelper(workloadFactory,
8162 memoryManager,
8163 shape0, input0, 7.0f, 3,
8164 shape1, input1, 7.0f, 3,
8165 shape0, output, 7.0f, 3);
8166}
8167
8168LayerTestResult<int16_t, 4> AdditionInt16Test(
8169 armnn::IWorkloadFactory& workloadFactory,
8170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8171{
8172 const unsigned int shape0[] = { 1, 2, 2, 3 };
8173 const unsigned int shape1[] = { 1, 2, 2, 3 };
8174
8175 std::vector<int16_t> input0(
8176 {
8177 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
8178 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
8179 });
8180
8181 std::vector<int16_t> input1(
8182 {
8183 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8184 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8185 });
8186
8187 std::vector<int16_t> output(
8188 {
8189 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
8190 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8191 });
8192
8193 return AdditionQuantizeTestHelper(workloadFactory,
8194 memoryManager,
8195 shape0, input0, 7.0f, 0,
8196 shape1, input1, 7.0f, 0,
8197 shape0, output, 7.0f, 0);
8198}
8199
8200namespace
8201{
8202template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8203LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8204 armnn::IWorkloadFactory& workloadFactory,
8205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8206 const unsigned int shape0[4],
8207 const std::vector<T> & values0,
8208 float scale0,
8209 int32_t offset0,
8210 const unsigned int shape1[4],
8211 const std::vector<T> & values1,
8212 float scale1,
8213 int32_t offset1,
8214 const unsigned int outShape[4],
8215 const std::vector<T> & outValues,
8216 float outScale,
8217 int32_t outOffset)
8218{
8219 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8220 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8221 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8222
8223 inputTensorInfo0.SetQuantizationScale(scale0);
8224 inputTensorInfo0.SetQuantizationOffset(offset0);
8225
8226 inputTensorInfo1.SetQuantizationScale(scale1);
8227 inputTensorInfo1.SetQuantizationOffset(offset1);
8228
8229 outputTensorInfo.SetQuantizationScale(outScale);
8230 outputTensorInfo.SetQuantizationOffset(outOffset);
8231
8232 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8233 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8234
8235 LayerTestResult<T, 4> result(outputTensorInfo);
8236 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00008237
surmeh01bceff2f2018-03-29 16:29:27 +01008238 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00008239 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00008240 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8241
8242 armnn::MultiplicationQueueDescriptor data;
8243 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01008244 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8245 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00008246 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8247
8248 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8249
surmeh01bceff2f2018-03-29 16:29:27 +01008250 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008251 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008252 outputHandle->Allocate();
8253
surmeh01bceff2f2018-03-29 16:29:27 +01008254 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008255 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008256
Derek Lambertif30f7d32019-04-09 10:25:02 +01008257 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00008258 workload->Execute();
8259
8260 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8261
8262 return result;
8263}
surmeh01bceff2f2018-03-29 16:29:27 +01008264} // anonymous namespace
8265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008266LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8267 armnn::IWorkloadFactory& workloadFactory,
8268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008269{
8270 unsigned int batchSize = 1;
8271 unsigned int channels = 2;
8272 unsigned int height = 2;
8273 unsigned int width = 3;
8274 const unsigned int shape[] = { batchSize, channels, height, width };
8275
telsoa01c577f2c2018-08-31 09:22:23 +01008276 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008277 std::vector<uint8_t> input0({
8278 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
8279 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
8280 });
8281
telsoa01c577f2c2018-08-31 09:22:23 +01008282 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008283 std::vector<uint8_t> input1({
8284 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8285 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
8286 });
8287
telsoa01c577f2c2018-08-31 09:22:23 +01008288 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008289 std::vector<uint8_t> output(
8290 {
8291 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8292 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
8293 });
8294
Sadik Armagan2999a022019-04-09 14:20:12 +01008295 // Scale/offset chosen to have output values out of range.
8296 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8297 memoryManager,
8298 shape,
8299 input0,
8300 4.0f,
8301 1,
8302 shape,
8303 input1,
8304 3.0f,
8305 -2,
8306 shape,
8307 output,
8308 1366.255f,
8309 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01008310}
8311
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008312LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8313 armnn::IWorkloadFactory& workloadFactory,
8314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008315{
8316 const unsigned int shape0[] = { 1, 2, 2, 3 };
8317 const unsigned int shape1[] = { 1, 1, 1, 1 };
8318
8319 std::vector<uint8_t> input0({
8320 1, 2, 3, 4, 5, 6,
8321 7, 8, 9, 10, 11, 12
8322 });
8323
8324 std::vector<uint8_t> input1({2});
8325
8326 std::vector<uint8_t> output({
8327 2, 4, 6, 8, 10, 12,
8328 14, 16, 18, 20, 22, 24
8329 });
8330
Sadik Armagan2999a022019-04-09 14:20:12 +01008331 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8332 memoryManager,
8333 shape0,
8334 input0,
8335 1.0f,
8336 0,
8337 shape1,
8338 input1,
8339 1.0f,
8340 0,
8341 shape0,
8342 output,
8343 1.0f,
8344 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008345}
8346
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008347LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8348 armnn::IWorkloadFactory& workloadFactory,
8349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008350{
8351 const unsigned int shape0[] = { 1, 2, 2, 3 };
8352 const unsigned int shape1[] = { 1, 1, 1, 3 };
8353
8354 std::vector<uint8_t> input0({
8355 1, 2, 3, 4, 5, 6,
8356 7, 8, 9, 10, 11, 12
8357 });
8358
8359 std::vector<uint8_t> input1({1, 2, 3});
8360
8361 std::vector<uint8_t> output({
8362 1, 4, 9, 4, 10, 18,
8363 7, 16, 27, 10, 22, 36
8364 });
8365
Sadik Armagan2999a022019-04-09 14:20:12 +01008366 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8367 memoryManager,
8368 shape0,
8369 input0,
8370 1.0f,
8371 0,
8372 shape1,
8373 input1,
8374 1.0f,
8375 0,
8376 shape0,
8377 output,
8378 1.0f,
8379 0);
8380}
8381
8382LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8383 armnn::IWorkloadFactory& workloadFactory,
8384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8385{
8386 const unsigned int shape[] = { 1, 2, 2, 3 };
8387
8388 std::vector<int16_t> input0(
8389 {
8390 6, 7, 8, 9, 10, 11,
8391 12, 13, 14, 15, 16, 17
8392 });
8393
8394 std::vector<int16_t> input1(
8395 {
8396 1, 2, 3, 4, 5, 6,
8397 7, 8, 9, 10, 11, 12
8398 });
8399
8400 std::vector<int16_t> output(
8401 {
8402 6, 14, 24, 36, 50, 66,
8403 84, 104, 126, 150, 176, 204
8404 });
8405
8406 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8407 memoryManager,
8408 shape,
8409 input0,
8410 1.0f,
8411 0,
8412 shape,
8413 input1,
8414 1.0f,
8415 0,
8416 shape,
8417 output,
8418 1.0f,
8419 0);
8420}
8421
8422LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8423 armnn::IWorkloadFactory& workloadFactory,
8424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8425{
8426 const unsigned int shape0[] = { 1, 2, 2, 3 };
8427 const unsigned int shape1[] = { 1, 1, 1, 1 };
8428
8429 std::vector<int16_t> input0(
8430 {
8431 1, 2, 3, 4, 5, 6,
8432 7, 8, 9, 10, 11, 12
8433 });
8434
8435 std::vector<int16_t> input1({2});
8436
8437 std::vector<int16_t> output(
8438 {
8439 2, 4, 6, 8, 10, 12,
8440 14, 16, 18, 20, 22, 24
8441 });
8442
8443 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8444 memoryManager,
8445 shape0,
8446 input0,
8447 1.0f,
8448 0,
8449 shape1,
8450 input1,
8451 1.0f,
8452 0,
8453 shape0,
8454 output,
8455 1.0f,
8456 0);
8457}
8458
8459LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8460 armnn::IWorkloadFactory& workloadFactory,
8461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8462{
8463 const unsigned int shape0[] = { 1, 2, 2, 3 };
8464 const unsigned int shape1[] = { 1, 1, 1, 3 };
8465
8466 std::vector<int16_t> input0(
8467 {
8468 1, 2, 3, 4, 5, 6,
8469 7, 8, 9, 10, 11, 12
8470 });
8471
8472 std::vector<int16_t> input1({1, 2, 3});
8473
8474 std::vector<int16_t> output(
8475 {
8476 1, 4, 9, 4, 10, 18,
8477 7, 16, 27, 10, 22, 36
8478 });
8479
8480 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8481 memoryManager,
8482 shape0,
8483 input0,
8484 1.0f,
8485 0,
8486 shape1,
8487 input1,
8488 1.0f,
8489 0,
8490 shape0,
8491 output,
8492 1.0f,
8493 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008494}
telsoa014fcda012018-03-09 14:13:49 +00008495
David Beckf195f032018-09-06 16:46:34 +01008496namespace
8497{
Sadik Armagan2999a022019-04-09 14:20:12 +01008498template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008499LayerTestResult<T, 4> SubtractionTestHelper(
8500 armnn::IWorkloadFactory& workloadFactory,
8501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8502 const unsigned int shape0[4],
8503 const std::vector<T>& values0,
8504 float scale0,
8505 int32_t offset0,
8506 const unsigned int shape1[4],
8507 const std::vector<T> & values1,
8508 float scale1,
8509 int32_t offset1,
8510 const unsigned int outShape[4],
8511 const std::vector<T> & outValues,
8512 float outScale,
8513 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008514{
Sadik Armagan2999a022019-04-09 14:20:12 +01008515 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8516 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8517 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008518
8519 inputTensorInfo0.SetQuantizationScale(scale0);
8520 inputTensorInfo0.SetQuantizationOffset(offset0);
8521
8522 inputTensorInfo1.SetQuantizationScale(scale1);
8523 inputTensorInfo1.SetQuantizationOffset(offset1);
8524
8525 outputTensorInfo.SetQuantizationScale(outScale);
8526 outputTensorInfo.SetQuantizationOffset(outOffset);
8527
8528 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8529 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8530
8531 LayerTestResult<T, 4> result(outputTensorInfo);
8532 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8533
8534 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8535 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8536 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8537
8538 armnn::SubtractionQueueDescriptor data;
8539 armnn::WorkloadInfo info;
8540 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8541 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8542 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8543
8544 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8545
8546 inputHandle0->Allocate();
8547 inputHandle1->Allocate();
8548 outputHandle->Allocate();
8549
8550 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8551 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8552
Derek Lambertif30f7d32019-04-09 10:25:02 +01008553 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008554 workload->Execute();
8555
8556 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8557
8558 return result;
8559}
8560} // anonymous namespace
8561
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008562LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8563 armnn::IWorkloadFactory& workloadFactory,
8564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008565{
8566 const unsigned int shape0[] = { 1, 1, 2, 2 };
8567 const unsigned int shape1[] = { 1, 1, 2, 2 };
8568
8569 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8570 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8571 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8572
Sadik Armagan2999a022019-04-09 14:20:12 +01008573 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8574 memoryManager,
8575 shape0, input0, 0.5f, 2,
8576 shape1, input1, 1.0f, 0,
8577 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008578}
8579
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008580LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8581 armnn::IWorkloadFactory& workloadFactory,
8582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008583{
8584 const unsigned int shape0[] = { 1, 1, 2, 2 };
8585 const unsigned int shape1[] = { 1, 1, 1, 1 };
8586
8587 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8588 std::vector<uint8_t> input1({ 2 });
8589 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8590
Sadik Armagan2999a022019-04-09 14:20:12 +01008591 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8592 memoryManager,
8593 shape0, input0, 0.5f, 2,
8594 shape1, input1, 1.0f, 0,
8595 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008596}
8597
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008598LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8599 armnn::IWorkloadFactory& workloadFactory,
8600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008601{
8602 const unsigned int shape0[] = { 1, 1, 2, 2 };
8603 const unsigned int shape1[] = { 1, 1, 2, 1 };
8604
8605 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8606 std::vector<uint8_t> input1({ 2, 1 });
8607 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8608
Sadik Armagan2999a022019-04-09 14:20:12 +01008609 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8610 memoryManager,
8611 shape0, input0, 1.0f, 0,
8612 shape1, input1, 1.0f, 0,
8613 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008614}
8615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008616LayerTestResult<float, 4> SubtractionTest(
8617 armnn::IWorkloadFactory& workloadFactory,
8618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008619{
8620 const unsigned int shape0[] = { 1, 1, 2, 2 };
8621 const unsigned int shape1[] = { 1, 1, 2, 2 };
8622
8623 std::vector<float> input0({ 1, 2, 3, 4 });
8624 std::vector<float> input1({ 1, -1, 0, 2 });
8625 std::vector<float> output({ 0, 3, 3, 2 });
8626
Sadik Armagan2999a022019-04-09 14:20:12 +01008627 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8628 memoryManager,
8629 shape0, input0, 1.0f, 0,
8630 shape1, input1, 1.0f, 0,
8631 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008632}
8633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008634LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8635 armnn::IWorkloadFactory& workloadFactory,
8636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008637{
8638 const unsigned int shape0[] = { 1, 1, 2, 2 };
8639 const unsigned int shape1[] = { 1, 1, 1, 1 };
8640
8641 std::vector<float> input0({ 1, 2, 3, 4 });
8642 std::vector<float> input1({ 10 });
8643 std::vector<float> output({ -9, -8, -7, -6 });
8644
Sadik Armagan2999a022019-04-09 14:20:12 +01008645 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8646 memoryManager,
8647 shape0, input0, 1.0f, 0,
8648 shape1, input1, 1.0f, 0,
8649 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008650}
8651
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008652LayerTestResult<float, 4> SubtractionBroadcastTest(
8653 armnn::IWorkloadFactory& workloadFactory,
8654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008655{
8656 const unsigned int shape0[] = { 1, 1, 2, 2 };
8657 const unsigned int shape1[] = { 1, 1, 1, 2 };
8658
8659 std::vector<float> input0({ 1, 2, 3, 4 });
8660 std::vector<float> input1({ 10, -5 });
8661 std::vector<float> output({ -9, 7, -7, 9 });
8662
Sadik Armagan2999a022019-04-09 14:20:12 +01008663 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8664 memoryManager,
8665 shape0, input0, 1.0f, 0,
8666 shape1, input1, 1.0f, 0,
8667 shape0, output, 1.0f, 0);
8668}
8669
8670LayerTestResult<int16_t, 4> SubtractionInt16Test(
8671 armnn::IWorkloadFactory& workloadFactory,
8672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8673{
8674 const unsigned int shape0[] = { 1, 1, 2, 2 };
8675 const unsigned int shape1[] = { 1, 1, 2, 2 };
8676
8677 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8678 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8679 std::vector<int16_t> output({ 3, 3, 5, 5 });
8680
8681 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8682 memoryManager,
8683 shape0, input0, 0.5f, 0,
8684 shape1, input1, 1.0f, 0,
8685 shape0, output, 1.0f, 0);
8686}
8687
8688LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8689 armnn::IWorkloadFactory& workloadFactory,
8690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8691{
8692 const unsigned int shape0[] = { 1, 1, 2, 2 };
8693 const unsigned int shape1[] = { 1, 1, 1, 1 };
8694
8695 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8696 std::vector<int16_t> input1({ 2 });
8697 std::vector<int16_t> output({ 3, 4, 5, 6 });
8698
8699 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8700 memoryManager,
8701 shape0, input0, 0.5f, 0,
8702 shape1, input1, 1.0f, 0,
8703 shape0, output, 1.0f, 0);
8704}
8705
8706LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8707 armnn::IWorkloadFactory& workloadFactory,
8708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8709{
8710 const unsigned int shape0[] = { 1, 1, 2, 2 };
8711 const unsigned int shape1[] = { 1, 1, 2, 1 };
8712
8713 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8714 std::vector<int16_t> input1({ 2, 1 });
8715 std::vector<int16_t> output({ 8, 11, 12, 15 });
8716
8717 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8718 memoryManager,
8719 shape0, input0, 1.0f, 0,
8720 shape1, input1, 1.0f, 0,
8721 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008722}
8723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008724LayerTestResult<float, 4> BatchNormTest(
8725 armnn::IWorkloadFactory& workloadFactory,
8726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008727{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008728 // BatchSize: 1
8729 // Channels: 2
8730 // Height: 3
8731 // Width: 2
8732
8733 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8734 std::vector<float> inputValues
8735 {
8736 // Batch 0, Channel 0, Height (3) x Width (2)
8737 1.f, 4.f,
8738 4.f, 2.f,
8739 1.f, 6.f,
8740
8741 // Batch 0, Channel 1, Height (3) x Width (2)
8742 1.f, 1.f,
8743 4.f, 1.f,
8744 -2.f, 4.f
8745 };
8746 std::vector<float> expectedOutputValues
8747 {
8748 // Batch 0, Channel 0, Height (3) x Width (2)
8749 1.f, 4.f,
8750 4.f, 2.f,
8751 1.f, 6.f,
8752
8753 // Batch 0, Channel 1, Height (3) x Width (2)
8754 3.f, 3.f,
8755 4.f, 3.f,
8756 2.f, 4.f
8757 };
8758
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008759 return BatchNormTestImpl<armnn::DataType::Float32>(
8760 workloadFactory, memoryManager,
8761 inputOutputShape, inputValues, expectedOutputValues,
8762 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008763}
8764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008765LayerTestResult<float, 4> BatchNormNhwcTest(
8766 armnn::IWorkloadFactory& workloadFactory,
8767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008768{
8769 // BatchSize: 1
8770 // Height: 3
8771 // Width: 2
8772 // Channels: 2
8773
8774 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8775 std::vector<float> inputValues
8776 {
8777 // Batch 0, Height 0, Width (2) x Channel (2)
8778 1.f, 1.f,
8779 4.f, 1.f,
8780
8781 // Batch 0, Height 1, Width (2) x Channel (2)
8782 4.f, 4.f,
8783 2.f, 1.f,
8784
8785 // Batch 0, Height 2, Width (2) x Channel (2)
8786 1.f, -2.f,
8787 6.f, 4.f
8788 };
8789 std::vector<float> expectedOutputValues
8790 {
8791 // Batch 0, Height 0, Width (2) x Channel (2)
8792 1.f, 3.f,
8793 4.f, 3.f,
8794
8795 // Batch 0, Height 1, Width (2) x Channel (2)
8796 4.f, 4.f,
8797 2.f, 3.f,
8798
8799 // Batch 0, Height 2, Width (2) x Channel (2)
8800 1.f, 2.f,
8801 6.f, 4.f
8802 };
8803
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008804 return BatchNormTestImpl<armnn::DataType::Float32>(
8805 workloadFactory, memoryManager,
8806 inputOutputShape, inputValues, expectedOutputValues,
8807 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008808}
8809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008810LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8811 armnn::IWorkloadFactory& workloadFactory,
8812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008813{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008814 // BatchSize: 1
8815 // Channels: 2
8816 // Height: 3
8817 // Width: 2
8818
8819 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8820 std::vector<float> inputValues
8821 {
8822 // Batch 0, Channel 0, Height (3) x Width (2)
8823 1.f, 4.f,
8824 4.f, 2.f,
8825 1.f, 6.f,
8826
8827 // Batch 0, Channel 1, Height (3) x Width (2)
8828 1.f, 1.f,
8829 4.f, 1.f,
8830 -2.f, 4.f
8831 };
8832 std::vector<float> expectedOutputValues
8833 {
8834 // Batch 0, Channel 0, Height (3) x Width (2)
8835 1.f, 4.f,
8836 4.f, 2.f,
8837 1.f, 6.f,
8838
8839 // Batch 0, Channel 1, Height (3) x Width (2)
8840 3.f, 3.f,
8841 4.f, 3.f,
8842 2.f, 4.f
8843 };
8844
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008845 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8846 workloadFactory, memoryManager,
8847 inputOutputShape, inputValues, expectedOutputValues,
8848 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008849}
8850
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008851LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8852 armnn::IWorkloadFactory& workloadFactory,
8853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008854{
8855 // BatchSize: 1
8856 // Height: 3
8857 // Width: 2
8858 // Channels: 2
8859
8860 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8861 std::vector<float> inputValues
8862 {
8863 // Batch 0, Height 0, Width (2) x Channel (2)
8864 1.f, 1.f,
8865 4.f, 1.f,
8866
8867 // Batch 0, Height 1, Width (2) x Channel (2)
8868 4.f, 4.f,
8869 2.f, 1.f,
8870
8871 // Batch 0, Height 2, Width (2) x Channel (2)
8872 1.f, -2.f,
8873 6.f, 4.f
8874 };
8875 std::vector<float> expectedOutputValues
8876 {
8877 // Batch 0, Height 0, Width (2) x Channel (2)
8878 1.f, 3.f,
8879 4.f, 3.f,
8880
8881 // Batch 0, Height 1, Width (2) x Channel (2)
8882 4.f, 4.f,
8883 2.f, 3.f,
8884
8885 // Batch 0, Height 2, Width (2) x Channel (2)
8886 1.f, 2.f,
8887 6.f, 4.f
8888 };
8889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008890 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8891 (workloadFactory, memoryManager,
8892 inputOutputShape, inputValues, expectedOutputValues,
8893 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008894}
8895
Matteo Martincighf5507132019-06-04 10:59:47 +01008896LayerTestResult<int16_t, 4> BatchNormInt16Test(
8897 armnn::IWorkloadFactory& workloadFactory,
8898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8899{
8900 // BatchSize: 1
8901 // Channels: 2
8902 // Height: 3
8903 // Width: 2
8904
8905 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8906 std::vector<float> inputValues
8907 {
8908 // Batch 0, Channel 0, Height (3) x Width (2)
8909 1.f, 4.f,
8910 4.f, 2.f,
8911 1.f, 6.f,
8912
8913 // Batch 0, Channel 1, Height (3) x Width (2)
8914 1.f, 1.f,
8915 4.f, 1.f,
8916 -2.f, 4.f
8917 };
8918 std::vector<float> expectedOutputValues
8919 {
8920 // Batch 0, Channel 0, Height (3) x Width (2)
8921 1.f, 4.f,
8922 4.f, 2.f,
8923 1.f, 6.f,
8924
8925 // Batch 0, Channel 1, Height (3) x Width (2)
8926 3.f, 3.f,
8927 4.f, 3.f,
8928 2.f, 4.f
8929 };
8930
8931 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8932 workloadFactory, memoryManager,
8933 inputOutputShape, inputValues, expectedOutputValues,
8934 1.f/20.f, 50, armnn::DataLayout::NCHW);
8935}
8936
8937LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8938 armnn::IWorkloadFactory& workloadFactory,
8939 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8940{
8941 // BatchSize: 1
8942 // Height: 3
8943 // Width: 2
8944 // Channels: 2
8945
8946 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8947 std::vector<float> inputValues
8948 {
8949 // Batch 0, Height 0, Width (2) x Channel (2)
8950 1.f, 1.f,
8951 4.f, 1.f,
8952
8953 // Batch 0, Height 1, Width (2) x Channel (2)
8954 4.f, 4.f,
8955 2.f, 1.f,
8956
8957 // Batch 0, Height 2, Width (2) x Channel (2)
8958 1.f, -2.f,
8959 6.f, 4.f
8960 };
8961 std::vector<float> expectedOutputValues
8962 {
8963 // Batch 0, Height 0, Width (2) x Channel (2)
8964 1.f, 3.f,
8965 4.f, 3.f,
8966
8967 // Batch 0, Height 1, Width (2) x Channel (2)
8968 4.f, 4.f,
8969 2.f, 3.f,
8970
8971 // Batch 0, Height 2, Width (2) x Channel (2)
8972 1.f, 2.f,
8973 6.f, 4.f
8974 };
8975
8976 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8977 (workloadFactory, memoryManager,
8978 inputOutputShape, inputValues, expectedOutputValues,
8979 1.f/20.f, 50, armnn::DataLayout::NHWC);
8980}
8981
Nina Drozd58ef2c62019-05-16 12:09:18 +01008982LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008983 armnn::IWorkloadFactory& workloadFactory,
8984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008985{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008986 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008987}
8988
Nina Drozd58ef2c62019-05-16 12:09:18 +01008989LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8990 armnn::IWorkloadFactory& workloadFactory,
8991 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8992{
8993 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8994}
8995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008996LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8997 armnn::IWorkloadFactory& workloadFactory,
8998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008999{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009000 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009001}
9002
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009003LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
9004 armnn::IWorkloadFactory& workloadFactory,
9005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009006{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009007 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009008}
9009
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009010LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
9011 armnn::IWorkloadFactory& workloadFactory,
9012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009013{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009014 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009015}
9016
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009017LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
9018 armnn::IWorkloadFactory& workloadFactory,
9019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009020{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009021 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9022 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009023}
9024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009025LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
9026 armnn::IWorkloadFactory& workloadFactory,
9027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009028{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009029 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9030 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009031}
9032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009033LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
9034 armnn::IWorkloadFactory& workloadFactory,
9035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009036{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009037 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009038}
9039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009040LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
9041 armnn::IWorkloadFactory& workloadFactory,
9042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009043{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009044 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009045}
9046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009047LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
9048 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9050 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00009051{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009052 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9053 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009054}
9055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009056LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
9057 armnn::IWorkloadFactory& workloadFactory,
9058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009060 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009061}
9062
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009063LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
9064 armnn::IWorkloadFactory& workloadFactory,
9065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009066{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009067 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9068 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009069}
9070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009071LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
9072 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9074 bool useSubtensor)
9075{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009076 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9077 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009078}
9079
9080LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9081 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009083{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009084 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009085}
9086
9087LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9088 armnn::IWorkloadFactory& workloadFactory,
9089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9090{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009091 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009092}
9093
9094LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9095 armnn::IWorkloadFactory& workloadFactory,
9096 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9097{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009098 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009099}
9100
9101LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9102 armnn::IWorkloadFactory& workloadFactory,
9103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9104{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009105 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9106 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00009107}
9108
9109LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9110 armnn::IWorkloadFactory& workloadFactory,
9111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9112{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009113 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9114 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009115}
9116
9117LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9118 armnn::IWorkloadFactory& workloadFactory,
9119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9120{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009121 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9122 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009123}
9124
9125LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9126 armnn::IWorkloadFactory& workloadFactory,
9127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9128{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009129 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9130 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009131}
9132
9133LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9134 armnn::IWorkloadFactory& workloadFactory,
9135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9136 bool useSubtensor)
9137{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009138 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9139 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00009140}
9141
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009142LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9143 armnn::IWorkloadFactory& workloadFactory,
9144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9145 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009146{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009147 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9148 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009149}
9150
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009151LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9152 armnn::IWorkloadFactory& workloadFactory,
9153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9154 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009155{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009156 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009157 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00009158}
9159
Teresa Charlin0434df62019-06-06 13:40:35 +01009160LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9161 armnn::IWorkloadFactory& workloadFactory,
9162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9163 bool forceNoPadding)
9164{
9165 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9166 workloadFactory, memoryManager, forceNoPadding);
9167}
9168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009169LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9170 armnn::IWorkloadFactory& workloadFactory,
9171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9172 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009173{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009174 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9175 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009176}
9177
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009178LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9179 armnn::IWorkloadFactory& workloadFactory,
9180 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9181 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009182{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009183 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009184 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009185}
9186
Teresa Charlin0434df62019-06-06 13:40:35 +01009187LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9188 armnn::IWorkloadFactory& workloadFactory,
9189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9190 bool forceNoPadding)
9191{
9192 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9193 workloadFactory, memoryManager, forceNoPadding);
9194}
9195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009196LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9197 armnn::IWorkloadFactory& workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009199 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009200{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009201 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009202}
9203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009204LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9205 armnn::IWorkloadFactory& workloadFactory,
9206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009207 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01009208{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009209 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01009210}
9211
Teresa Charlin0434df62019-06-06 13:40:35 +01009212LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9213 armnn::IWorkloadFactory& workloadFactory,
9214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9215 const armnn::DataLayout dataLayout)
9216{
9217 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9218}
9219LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9220 armnn::IWorkloadFactory& workloadFactory,
9221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9222{
9223 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9224}
9225
9226LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9229{
9230 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9231 workloadFactory, memoryManager, 1.0f, -5);
9232}
9233
9234LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9235 armnn::IWorkloadFactory& workloadFactory,
9236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9237{
9238 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9239 workloadFactory, memoryManager);
9240}
9241
9242LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9243 armnn::IWorkloadFactory& workloadFactory,
9244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9245{
9246 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9247}
9248
9249LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9250 armnn::IWorkloadFactory& workloadFactory,
9251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9252{
9253 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9254 workloadFactory, memoryManager, 1.0f, -5);
9255}
9256
9257LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9258 armnn::IWorkloadFactory& workloadFactory,
9259 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9260{
9261 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9262 workloadFactory, memoryManager);
9263}
9264
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009265LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9266 armnn::IWorkloadFactory& workloadFactory,
9267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009268 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009269{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009270 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01009271}
9272
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009273LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9274 armnn::IWorkloadFactory& workloadFactory,
9275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009276 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01009277{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009278 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009279 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009280}
9281
Teresa Charlin0434df62019-06-06 13:40:35 +01009282LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9283 armnn::IWorkloadFactory& workloadFactory,
9284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9285 const armnn::DataLayout dataLayout)
9286{
9287 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9288 workloadFactory, memoryManager, dataLayout);
9289}
9290
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009291LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9292 armnn::IWorkloadFactory& workloadFactory,
9293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9294 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01009295{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009296 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009297 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01009298}
9299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009300LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9301 armnn::IWorkloadFactory& workloadFactory,
9302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009303{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009304 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009305}
9306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009307LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9308 armnn::IWorkloadFactory& workloadFactory,
9309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009311 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9312 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009313}
9314
Teresa Charlin0434df62019-06-06 13:40:35 +01009315LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9316 armnn::IWorkloadFactory& workloadFactory,
9317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9318{
9319 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9320 workloadFactory, memoryManager);
9321}
9322LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9323 armnn::IWorkloadFactory& workloadFactory,
9324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9325{
9326 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9327}
9328
9329LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9330 armnn::IWorkloadFactory& workloadFactory,
9331 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9332{
9333 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9334 workloadFactory, memoryManager);
9335}
9336
9337LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9338 armnn::IWorkloadFactory& workloadFactory,
9339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9340{
9341 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9342 workloadFactory, memoryManager);
9343}
9344
9345LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9346 armnn::IWorkloadFactory& workloadFactory,
9347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9348{
9349 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9350 workloadFactory, memoryManager);
9351}
9352
9353LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9354 armnn::IWorkloadFactory& workloadFactory,
9355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9356{
9357 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9358 workloadFactory, memoryManager);
9359}
9360
9361LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9362 armnn::IWorkloadFactory& workloadFactory,
9363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9364{
9365 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9366 workloadFactory, memoryManager);
9367}
9368
9369LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9370 armnn::IWorkloadFactory& workloadFactory,
9371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9372{
9373 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9374}
9375
9376LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9377 armnn::IWorkloadFactory& workloadFactory,
9378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9379{
9380 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9381 workloadFactory, memoryManager);
9382}
9383
9384LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9385 armnn::IWorkloadFactory& workloadFactory,
9386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9387{
9388 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9389 workloadFactory, memoryManager);
9390}
9391
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009392LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9393 armnn::IWorkloadFactory& workloadFactory,
9394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009395 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009396{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009397 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009398}
9399
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009400LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9401 armnn::IWorkloadFactory& workloadFactory,
9402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009403 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009404{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009405 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009406}
9407
Teresa Charlin0434df62019-06-06 13:40:35 +01009408LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9409 armnn::IWorkloadFactory& workloadFactory,
9410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9411 const armnn::DataLayout dataLayout)
9412{
9413 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9414}
9415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009416LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9417 armnn::IWorkloadFactory& workloadFactory,
9418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009419{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009420 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009421}
9422
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009423LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9424 armnn::IWorkloadFactory& workloadFactory,
9425 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009426{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009427 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009428}
9429
Teresa Charlin0434df62019-06-06 13:40:35 +01009430LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9431 armnn::IWorkloadFactory& workloadFactory,
9432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9433{
9434 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9435}
9436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009437LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9438 armnn::IWorkloadFactory& workloadFactory,
9439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009441 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009442}
9443
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009444LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9445 armnn::IWorkloadFactory& workloadFactory,
9446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009447{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009448 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009449}
9450
Teresa Charlin0434df62019-06-06 13:40:35 +01009451LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9452 armnn::IWorkloadFactory& workloadFactory,
9453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9454{
9455 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9456}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009457LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9458 armnn::IWorkloadFactory& workloadFactory,
9459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009460{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009461 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009462}
9463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009464LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9465 armnn::IWorkloadFactory& workloadFactory,
9466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009467{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009468 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009469}
9470
Teresa Charlin0434df62019-06-06 13:40:35 +01009471LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9472 armnn::IWorkloadFactory& workloadFactory,
9473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9474{
9475 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9476}
9477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009478LayerTestResult<float, 4> L2Pooling2dSize7Test(
9479 armnn::IWorkloadFactory& workloadFactory,
9480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009481{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009482 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009483}
9484
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009485LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9486 armnn::IWorkloadFactory& workloadFactory,
9487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009488{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009489 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009490}
9491
Teresa Charlin0434df62019-06-06 13:40:35 +01009492LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9493 armnn::IWorkloadFactory& workloadFactory,
9494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9495{
9496 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9497}
9498
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009499LayerTestResult<float, 4> L2Pooling2dSize9Test(
9500 armnn::IWorkloadFactory& workloadFactory,
9501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009502{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009503 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009504}
9505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009506LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9507 armnn::IWorkloadFactory& workloadFactory,
9508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009509{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009510 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009511}
9512
Teresa Charlin0434df62019-06-06 13:40:35 +01009513LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9514 armnn::IWorkloadFactory& workloadFactory,
9515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9516{
9517 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9518}
9519LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9520 armnn::IWorkloadFactory& workloadFactory,
9521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9522{
9523 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9524}
9525
9526LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9527 armnn::IWorkloadFactory& workloadFactory,
9528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9529{
9530 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9531}
9532
9533LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9534 armnn::IWorkloadFactory& workloadFactory,
9535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9536{
9537 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9538}
9539
9540LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9541 armnn::IWorkloadFactory& workloadFactory,
9542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9543{
9544 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9545}
9546
9547LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9548 armnn::IWorkloadFactory& workloadFactory,
9549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9550{
9551 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9552}
9553
9554LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9555 armnn::IWorkloadFactory& workloadFactory,
9556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9557{
9558 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9559}
9560
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009561LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9562 armnn::IWorkloadFactory& workloadFactory,
9563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009564{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009565 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009566}
9567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009568LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9569 armnn::IWorkloadFactory& workloadFactory,
9570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009571{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009572 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009573}
9574
Teresa Charlin0434df62019-06-06 13:40:35 +01009575LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9576 armnn::IWorkloadFactory& workloadFactory,
9577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9578{
9579 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9580}
9581
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009582LayerTestResult<float, 4> ComparePooling2dTest(
9583 armnn::IWorkloadFactory& workloadFactory,
9584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9585 armnn::IWorkloadFactory& refWorkloadFactory,
9586 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009587{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009588 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009589 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009590}
9591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009592LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9593 armnn::IWorkloadFactory& workloadFactory,
9594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9595 armnn::IWorkloadFactory& refWorkloadFactory,
9596 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009597{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009598 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009599 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009600}
9601
Teresa Charlin0434df62019-06-06 13:40:35 +01009602LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9603 armnn::IWorkloadFactory& workloadFactory,
9604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9605 armnn::IWorkloadFactory& refWorkloadFactory,
9606 armnn::PoolingAlgorithm poolingType)
9607{
9608 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9609 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9610}
9611
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009612LayerTestResult<float, 2> FullyConnectedLargeTest(
9613 armnn::IWorkloadFactory& workloadFactory,
9614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9615 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009616{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009617 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009618}
9619
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009620LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9621 armnn::IWorkloadFactory& workloadFactory,
9622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009623{
9624 // Create Initial Tensor
9625 // 1, 2, 3
9626 // 4, 5, 6
9627 // 7, 8, 9
9628
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009629 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9630 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009631
9632 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9633 {1, 2, 3,
9634 4, 5, 6,
9635 7, 8, 9
9636 });
9637
9638 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9639 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9640 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9641 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9642
9643 // Apply MaxPool poolSize = 1x1, stride=2x2
9644 // Result =
9645 // 1, 3
9646 // 7, 9
9647 armnn::Pooling2dDescriptor descriptor;
9648 descriptor.m_PoolHeight = 1;
9649 descriptor.m_PoolWidth = 1;
9650 descriptor.m_StrideX = 2;
9651 descriptor.m_StrideY = 2;
9652 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9653
9654 armnn::Pooling2dQueueDescriptor queueDescriptor;
9655 queueDescriptor.m_Parameters = descriptor;
9656 armnn::WorkloadInfo workloadInfo;
9657 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9658 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9659
9660 // Create the MaxPool
9661 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9662
9663 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9664 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9665 boost::multi_array<float, 4> resultMaxPool;
9666 resultMaxPool.resize(shape);
9667
9668
9669 // Create addition with another tensor the same size
9670 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9671 // with the initial tensor.
9672 // 12, 16
9673 // 24, 28
9674
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009675 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9676 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009677
9678 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9679 {12, 16,
9680 24, 28,
9681 });
9682
9683 // Expected output tensor after MaxPool and Addition.
9684 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9685 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9686 {
9687 13, 19,
9688 31, 37
9689 }));
9690
9691 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9692 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9693
9694 armnn::AdditionQueueDescriptor data;
9695 armnn::WorkloadInfo info;
9696
9697 // Add the output of the MaxPool and the new tensor
9698 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9699 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9700 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9701
9702 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9703
9704 poolingInputHandle->Allocate();
9705 poolingOutputHandle->Allocate();
9706 addInputHandle->Allocate();
9707 addOutputHandle->Allocate();
9708
9709 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9710 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9711
9712 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9713 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9714
Derek Lambertif30f7d32019-04-09 10:25:02 +01009715 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009716 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009717 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009718 addWorkload->Execute();
9719
9720 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9721
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009722 return addRet;
9723}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009725LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9726 armnn::IWorkloadFactory& workloadFactory,
9727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009729 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009730}
9731
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009732LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9733 armnn::IWorkloadFactory& workloadFactory,
9734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009735{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009736 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009737}
9738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009739LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9740 armnn::IWorkloadFactory& workloadFactory,
9741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009742{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009743 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009744}
9745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009746LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9747 armnn::IWorkloadFactory& workloadFactory,
9748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009749{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009750 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009751}
9752
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009753LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9754 armnn::IWorkloadFactory& workloadFactory,
9755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009756{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009757 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009758}
9759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009760LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9761 armnn::IWorkloadFactory& workloadFactory,
9762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009763{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009764 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009765}
9766
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009767LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9768 armnn::IWorkloadFactory& workloadFactory,
9769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009770{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009771 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009772}
9773
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009774LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9775 armnn::IWorkloadFactory& workloadFactory,
9776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009777{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009778 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009779}
9780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009781LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9782 armnn::IWorkloadFactory& workloadFactory,
9783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009784{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009785 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009786}
9787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009788LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9789 armnn::IWorkloadFactory& workloadFactory,
9790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009791{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009792 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009793}
9794
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009795LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9796 armnn::IWorkloadFactory& workloadFactory,
9797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009798{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009799 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009800}
9801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009802LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9803 armnn::IWorkloadFactory& workloadFactory,
9804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009805{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009806 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009807}
9808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009809LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9810 armnn::IWorkloadFactory& workloadFactory,
9811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009812{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009813 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009814}
9815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009816LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9817 armnn::IWorkloadFactory& workloadFactory,
9818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009819{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009820 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009821}
9822
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009823LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9824 armnn::IWorkloadFactory& workloadFactory,
9825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009826{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009827 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009828}
9829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009830LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9831 armnn::IWorkloadFactory& workloadFactory,
9832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009833{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009834 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009835}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009836
nikraj01120522a2019-05-31 11:33:07 +01009837LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9838 armnn::IWorkloadFactory& workloadFactory,
9839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9840{
9841 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9842}
9843
9844LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9845 armnn::IWorkloadFactory& workloadFactory,
9846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9847{
9848 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9849}
9850
9851LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9852 armnn::IWorkloadFactory& workloadFactory,
9853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9854{
9855 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9856}
9857
9858LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9859 armnn::IWorkloadFactory& workloadFactory,
9860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9861{
9862 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9863}
9864
9865LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9866 armnn::IWorkloadFactory& workloadFactory,
9867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9868{
9869 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9870}
9871
9872LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9873 armnn::IWorkloadFactory& workloadFactory,
9874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9875{
9876 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9877}
9878
9879LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9880 armnn::IWorkloadFactory& workloadFactory,
9881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9882{
9883 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9884}
9885
9886LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9887 armnn::IWorkloadFactory& workloadFactory,
9888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9889{
9890 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9891}
9892
Keith Davisa57eccb2019-06-14 17:33:22 +01009893LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9894 armnn::IWorkloadFactory& workloadFactory,
9895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9896{
James Conroyd2aa85e2019-07-01 17:12:40 +01009897 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009898 workloadFactory,
9899 memoryManager);
9900}
9901
9902LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9903 armnn::IWorkloadFactory& workloadFactory,
9904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9905{
James Conroyd2aa85e2019-07-01 17:12:40 +01009906 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009907 workloadFactory,
9908 memoryManager,
9909 armnn::DataLayout::NCHW);
9910}
9911
James Conroyd2aa85e2019-07-01 17:12:40 +01009912LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009913 armnn::IWorkloadFactory& workloadFactory,
9914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9915{
James Conroyd2aa85e2019-07-01 17:12:40 +01009916 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009917 workloadFactory,
9918 memoryManager);
9919}
9920
James Conroyd2aa85e2019-07-01 17:12:40 +01009921LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009922 armnn::IWorkloadFactory& workloadFactory,
9923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9924{
James Conroyd2aa85e2019-07-01 17:12:40 +01009925 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9926 workloadFactory,
9927 memoryManager,
9928 armnn::DataLayout::NCHW);
9929}
9930
9931LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9932 armnn::IWorkloadFactory& workloadFactory,
9933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9934{
9935 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9936 workloadFactory,
9937 memoryManager);
9938}
9939
9940LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9941 armnn::IWorkloadFactory& workloadFactory,
9942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9943{
9944 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9945 workloadFactory,
9946 memoryManager,
9947 armnn::DataLayout::NCHW);
9948}
9949
9950LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9951 armnn::IWorkloadFactory& workloadFactory,
9952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9953{
9954 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9955 workloadFactory,
9956 memoryManager);
9957}
9958
9959LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9960 armnn::IWorkloadFactory& workloadFactory,
9961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9962{
9963 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009964 workloadFactory,
9965 memoryManager,
9966 armnn::DataLayout::NCHW);
9967}
9968
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009969namespace {
9970
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009971} // anonymous namespace
9972
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009973LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9974 armnn::IWorkloadFactory& workloadFactory,
9975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9976{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009977 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009978}
9979
9980LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9981 armnn::IWorkloadFactory& workloadFactory,
9982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009984 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009985}
9986
9987LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9988 armnn::IWorkloadFactory& workloadFactory,
9989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9990{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009991 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009992}
9993
9994LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9995 armnn::IWorkloadFactory& workloadFactory,
9996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9997{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009998 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009999}
10000
10001LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
10002 armnn::IWorkloadFactory& workloadFactory,
10003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10004{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010005 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010006}
10007
10008LayerTestResult<float, 3> StridedSlice3DFloat32Test(
10009 armnn::IWorkloadFactory& workloadFactory,
10010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10011{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010012 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010013}
10014
10015LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
10016 armnn::IWorkloadFactory& workloadFactory,
10017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10018{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010019 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010020}
10021
10022LayerTestResult<float, 2> StridedSlice2DFloat32Test(
10023 armnn::IWorkloadFactory& workloadFactory,
10024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10025{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010026 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010027}
10028
10029LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
10030 armnn::IWorkloadFactory& workloadFactory,
10031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10032{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010033 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010034}
10035
10036LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
10037 armnn::IWorkloadFactory& workloadFactory,
10038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10039{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010040 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010041}
10042
10043LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
10044 armnn::IWorkloadFactory& workloadFactory,
10045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10046{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010047 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010048}
10049
10050LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
10051 armnn::IWorkloadFactory& workloadFactory,
10052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10053{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010054 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010055}
10056
10057LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
10058 armnn::IWorkloadFactory& workloadFactory,
10059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10060{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010061 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010062}
10063
10064LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
10065 armnn::IWorkloadFactory& workloadFactory,
10066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10067{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010068 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010069}
10070
10071LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
10072 armnn::IWorkloadFactory& workloadFactory,
10073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10074{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010075 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010076}
10077
10078LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
10079 armnn::IWorkloadFactory& workloadFactory,
10080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10081{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010082 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010083}
10084
10085LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10086 armnn::IWorkloadFactory& workloadFactory,
10087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10088{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010089 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010090}
10091
10092LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10093 armnn::IWorkloadFactory& workloadFactory,
10094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10095{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010096 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010097}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010098
Matteo Martincigh42666a12019-05-29 08:53:41 +010010099LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10100 armnn::IWorkloadFactory& workloadFactory,
10101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10102{
10103 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10104}
10105
10106LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10107 armnn::IWorkloadFactory& workloadFactory,
10108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10109{
10110 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10111}
10112
10113LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10114 armnn::IWorkloadFactory& workloadFactory,
10115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10116{
10117 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10118}
10119
10120LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10121 armnn::IWorkloadFactory& workloadFactory,
10122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10123{
10124 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10125}
10126
10127LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10128 armnn::IWorkloadFactory& workloadFactory,
10129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10130{
10131 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10132}
10133
10134LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10135 armnn::IWorkloadFactory& workloadFactory,
10136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10137{
10138 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10139}
10140
10141LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10142 armnn::IWorkloadFactory& workloadFactory,
10143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10144{
10145 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10146}
10147
10148LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10149 armnn::IWorkloadFactory& workloadFactory,
10150 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10151{
10152 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10153}
10154
10155LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10156 armnn::IWorkloadFactory& workloadFactory,
10157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10158{
10159 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10160}
10161
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010162LayerTestResult<float, 4> Debug4DFloat32Test(
10163 armnn::IWorkloadFactory& workloadFactory,
10164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10165{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010166 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010167}
10168
10169LayerTestResult<float, 3> Debug3DFloat32Test(
10170 armnn::IWorkloadFactory& workloadFactory,
10171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010173 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010174}
10175
10176LayerTestResult<float, 2> Debug2DFloat32Test(
10177 armnn::IWorkloadFactory& workloadFactory,
10178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10179{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010180 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010181}
10182
10183LayerTestResult<float, 1> Debug1DFloat32Test(
10184 armnn::IWorkloadFactory& workloadFactory,
10185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10186{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010187 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010188}
10189
10190LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10191 armnn::IWorkloadFactory& workloadFactory,
10192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10193{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010194 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010195}
10196
10197LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10198 armnn::IWorkloadFactory& workloadFactory,
10199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10200{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010201 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010202}
10203
10204LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10205 armnn::IWorkloadFactory& workloadFactory,
10206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10207{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010208 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010209}
10210
10211LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10212 armnn::IWorkloadFactory& workloadFactory,
10213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10214{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010215 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010216}
Matteo Martincigh49124022019-01-11 13:25:59 +000010217
narpra014951d842019-01-18 16:53:53 +000010218LayerTestResult<float, 1> Gather1DParamsFloatTest(
10219 armnn::IWorkloadFactory& workloadFactory,
10220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10221{
10222 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10223}
10224
10225LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10226 armnn::IWorkloadFactory& workloadFactory,
10227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10228{
10229 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10230}
10231
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010232LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10233 armnn::IWorkloadFactory& workloadFactory,
10234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10235{
10236 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10237}
10238
narpra014951d842019-01-18 16:53:53 +000010239LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10240 armnn::IWorkloadFactory& workloadFactory,
10241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10242{
10243 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10244}
10245
10246LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10247 armnn::IWorkloadFactory& workloadFactory,
10248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10249{
10250 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10251}
10252
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010253LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10254 armnn::IWorkloadFactory& workloadFactory,
10255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10256{
10257 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10258}
10259
narpra014951d842019-01-18 16:53:53 +000010260LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10261 armnn::IWorkloadFactory& workloadFactory,
10262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10263{
10264 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10265}
10266
10267LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10268 armnn::IWorkloadFactory& workloadFactory,
10269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10270{
10271 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10272 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +000010273}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010274
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010275LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10276 armnn::IWorkloadFactory& workloadFactory,
10277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10278{
10279 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10280 workloadFactory, memoryManager);
10281}
10282
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010283LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010284 armnn::IWorkloadFactory& workloadFactory,
10285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10286{
10287 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10288}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010289
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010290LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10291 armnn::IWorkloadFactory& workloadFactory,
10292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10293{
10294 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10295}
10296
10297LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10298 armnn::IWorkloadFactory& workloadFactory,
10299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10300{
10301 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10302}
10303
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010304LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10305 armnn::IWorkloadFactory& workloadFactory,
10306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10307{
10308 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10309}
10310
10311LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10312 armnn::IWorkloadFactory& workloadFactory,
10313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10314{
10315 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10316}
10317
10318LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10319 armnn::IWorkloadFactory& workloadFactory,
10320 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10321{
10322 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10323}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010010324
10325//
10326// TransposeConvolution2d
10327//
10328
10329// Simple biased
10330LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
10331 armnn::IWorkloadFactory& workloadFactory,
10332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10333{
10334 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10335 workloadFactory,
10336 memoryManager,
10337 true,
10338 armnn::DataLayout::NCHW);
10339}
10340
10341LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
10342 armnn::IWorkloadFactory& workloadFactory,
10343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10344{
10345 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10346 workloadFactory,
10347 memoryManager,
10348 true,
10349 armnn::DataLayout::NHWC);
10350}
10351
10352LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
10353 armnn::IWorkloadFactory& workloadFactory,
10354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10355{
10356 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10357 workloadFactory,
10358 memoryManager,
10359 true,
10360 armnn::DataLayout::NCHW);
10361}
10362
10363LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
10364 armnn::IWorkloadFactory& workloadFactory,
10365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10366{
10367 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10368 workloadFactory,
10369 memoryManager,
10370 true,
10371 armnn::DataLayout::NHWC);
10372}
10373
10374LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
10375 armnn::IWorkloadFactory& workloadFactory,
10376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10377{
10378 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10379 workloadFactory,
10380 memoryManager,
10381 true,
10382 armnn::DataLayout::NCHW);
10383}
10384
10385LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
10386 armnn::IWorkloadFactory& workloadFactory,
10387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10388{
10389 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10390 workloadFactory,
10391 memoryManager,
10392 true,
10393 armnn::DataLayout::NHWC);
10394}
10395
10396// Simple unbiased
10397LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
10398 armnn::IWorkloadFactory& workloadFactory,
10399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10400{
10401 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10402 workloadFactory,
10403 memoryManager,
10404 false,
10405 armnn::DataLayout::NCHW);
10406}
10407
10408LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
10409 armnn::IWorkloadFactory& workloadFactory,
10410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10411{
10412 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10413 workloadFactory,
10414 memoryManager,
10415 false,
10416 armnn::DataLayout::NHWC);
10417}
10418
10419LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10420 armnn::IWorkloadFactory& workloadFactory,
10421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10422{
10423 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10424 workloadFactory,
10425 memoryManager,
10426 false,
10427 armnn::DataLayout::NCHW);
10428}
10429
10430LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10431 armnn::IWorkloadFactory& workloadFactory,
10432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10433{
10434 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10435 workloadFactory,
10436 memoryManager,
10437 false,
10438 armnn::DataLayout::NHWC);
10439}
10440
10441LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10442 armnn::IWorkloadFactory& workloadFactory,
10443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10444{
10445 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10446 workloadFactory,
10447 memoryManager,
10448 false,
10449 armnn::DataLayout::NCHW);
10450}
10451
10452LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10453 armnn::IWorkloadFactory& workloadFactory,
10454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10455{
10456 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10457 workloadFactory,
10458 memoryManager,
10459 false,
10460 armnn::DataLayout::NHWC);
10461}
10462
10463// Padded biased
10464LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10465 armnn::IWorkloadFactory& workloadFactory,
10466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10467{
10468 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10469 workloadFactory,
10470 memoryManager,
10471 true,
10472 armnn::DataLayout::NCHW);
10473}
10474
10475LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10476 armnn::IWorkloadFactory& workloadFactory,
10477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10478{
10479 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10480 workloadFactory,
10481 memoryManager,
10482 true,
10483 armnn::DataLayout::NHWC);
10484}
10485
10486LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10487 armnn::IWorkloadFactory& workloadFactory,
10488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10489{
10490 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10491 workloadFactory,
10492 memoryManager,
10493 true,
10494 armnn::DataLayout::NCHW);
10495}
10496
10497LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10498 armnn::IWorkloadFactory& workloadFactory,
10499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10500{
10501 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10502 workloadFactory,
10503 memoryManager,
10504 true,
10505 armnn::DataLayout::NHWC);
10506}
10507
10508LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10509 armnn::IWorkloadFactory& workloadFactory,
10510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10511{
10512 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10513 workloadFactory,
10514 memoryManager,
10515 true,
10516 armnn::DataLayout::NCHW);
10517}
10518
10519LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10520 armnn::IWorkloadFactory& workloadFactory,
10521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10522{
10523 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10524 workloadFactory,
10525 memoryManager,
10526 true,
10527 armnn::DataLayout::NHWC);
10528}
10529
10530// Padded unbiased
10531LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10532 armnn::IWorkloadFactory& workloadFactory,
10533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10534{
10535 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10536 workloadFactory,
10537 memoryManager,
10538 false,
10539 armnn::DataLayout::NCHW);
10540}
10541
10542LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10543 armnn::IWorkloadFactory& workloadFactory,
10544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10545{
10546 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10547 workloadFactory,
10548 memoryManager,
10549 false,
10550 armnn::DataLayout::NHWC);
10551}
10552
10553LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10554 armnn::IWorkloadFactory& workloadFactory,
10555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10556{
10557 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10558 workloadFactory,
10559 memoryManager,
10560 false,
10561 armnn::DataLayout::NCHW);
10562}
10563
10564LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10565 armnn::IWorkloadFactory& workloadFactory,
10566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10567{
10568 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10569 workloadFactory,
10570 memoryManager,
10571 false,
10572 armnn::DataLayout::NHWC);
10573}
10574
10575LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10576 armnn::IWorkloadFactory& workloadFactory,
10577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10578{
10579 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10580 workloadFactory,
10581 memoryManager,
10582 false,
10583 armnn::DataLayout::NCHW);
10584}
10585
10586LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10587 armnn::IWorkloadFactory& workloadFactory,
10588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10589{
10590 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10591 workloadFactory,
10592 memoryManager,
10593 false,
10594 armnn::DataLayout::NHWC);
10595}
10596
10597// Strided biased
10598LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10599 armnn::IWorkloadFactory& workloadFactory,
10600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10601{
10602 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10603 workloadFactory,
10604 memoryManager,
10605 true,
10606 armnn::DataLayout::NCHW);
10607}
10608
10609LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10610 armnn::IWorkloadFactory& workloadFactory,
10611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10612{
10613 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10614 workloadFactory,
10615 memoryManager,
10616 true,
10617 armnn::DataLayout::NHWC);
10618}
10619
10620LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10621 armnn::IWorkloadFactory& workloadFactory,
10622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10623{
10624 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10625 workloadFactory,
10626 memoryManager,
10627 true,
10628 armnn::DataLayout::NCHW);
10629}
10630
10631LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10632 armnn::IWorkloadFactory& workloadFactory,
10633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10634{
10635 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10636 workloadFactory,
10637 memoryManager,
10638 true,
10639 armnn::DataLayout::NHWC);
10640}
10641
10642LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10643 armnn::IWorkloadFactory& workloadFactory,
10644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10645{
10646 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10647 workloadFactory,
10648 memoryManager,
10649 true,
10650 armnn::DataLayout::NCHW);
10651}
10652
10653LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10654 armnn::IWorkloadFactory& workloadFactory,
10655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10656{
10657 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10658 workloadFactory,
10659 memoryManager,
10660 true,
10661 armnn::DataLayout::NHWC);
10662}
10663
10664// Strided unbiased
10665LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10666 armnn::IWorkloadFactory& workloadFactory,
10667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10668{
10669 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10670 workloadFactory,
10671 memoryManager,
10672 false,
10673 armnn::DataLayout::NCHW);
10674}
10675
10676LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10677 armnn::IWorkloadFactory& workloadFactory,
10678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10679{
10680 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10681 workloadFactory,
10682 memoryManager,
10683 false,
10684 armnn::DataLayout::NHWC);
10685}
10686
10687LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10688 armnn::IWorkloadFactory& workloadFactory,
10689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10690{
10691 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10692 workloadFactory,
10693 memoryManager,
10694 false,
10695 armnn::DataLayout::NCHW);
10696}
10697
10698LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10699 armnn::IWorkloadFactory& workloadFactory,
10700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10701{
10702 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10703 workloadFactory,
10704 memoryManager,
10705 false,
10706 armnn::DataLayout::NHWC);
10707}
10708
10709LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10710 armnn::IWorkloadFactory& workloadFactory,
10711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10712{
10713 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10714 workloadFactory,
10715 memoryManager,
10716 false,
10717 armnn::DataLayout::NCHW);
10718}
10719
10720LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10721 armnn::IWorkloadFactory& workloadFactory,
10722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10723{
10724 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10725 workloadFactory,
10726 memoryManager,
10727 false,
10728 armnn::DataLayout::NHWC);
James Conroy9c3cae82019-08-01 16:01:48 +010010729}