blob: f996edad65e90e22b60f66b203924576b8cddb64 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
Francis Murtagh07f21212019-07-23 09:50:50 +010080struct Simple3dSoftmaxOutputData
81{
82 const std::vector<float> outputData =
83 {
84 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86 };
87
88 const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90 const std::vector<float> inputData =
91 {
92 0.f, 1.f, 0.f, 0.f,
93 .5f, 0.f, 0.f, 0.f,
94 };
95};
96
97struct Simple4dSoftmaxData
98{
99 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101 const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103 const std::vector<float> inputData =
104 {
105 0.f, 1.f, 0.f, 0.f,
106 .5f, 0.f, 0.f, 0.f
107 };
108};
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +0100112boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +0000113{
114 if(biasEnabled)
115 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +0100117 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +0000118 return bias;
119 }
120 else
121 {
122 return boost::multi_array<T, 1>();
123 }
124}
125
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000126template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000127LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130 float qScale,
131 int32_t qOffset,
132 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000133 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000134{
telsoa01c577f2c2018-08-31 09:22:23 +0100135 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000136 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000137 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
telsoa01c577f2c2018-08-31 09:22:23 +0100139 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000140 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000141 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142 QuantizedVector<T>(qScale, qOffset, {
143 1, 1, 1,
144 1, -1, 1,
145 1, 1, 1,
146 1, 1, 1,
147 1, 1, 1,
148
149 0, 0, 0,
150 0, 0, 0,
151 0, 0, 0,
152 0, 0, 0,
153 0, 0, 0,
154
155 2, 2, 2,
156 2, 2, 2,
157 2, 2, 2,
158 2, 2, 2,
159 2, 2, 2,
160
161
162 0, 0, 0,
163 0, 0, 0,
164 0, 0, 0,
165 0, 0, 0,
166 0, 0, 0,
167
168 1, 1, 1,
169 1, 1, 1,
170 1, 1, 1,
171 1, 1, 1,
172 1, 1, 1,
173
174 0, 0, 0,
175 0, 0, 0,
176 0, 0, 0,
177 0, 0, 0,
178 0, 0, 0
179 })));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000182 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000183 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188 -23.5f, -23.5f, -23.5f,
189 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190 -23.5f, -23.5f, -23.5f,
191
192 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196 })));
197
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199 workloadFactory,
200 memoryManager,
201 input,
202 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100203 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000204 expectedOutput,
205 qScale,
206 qOffset,
207 layout);
telsoa014fcda012018-03-09 14:13:49 +0000208}
209
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000212LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213 armnn::IWorkloadFactory& workloadFactory,
214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215 float qScale,
216 int32_t qOffset,
217 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000218 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000219{
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000221
telsoa01c577f2c2018-08-31 09:22:23 +0100222 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000223 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000224 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 1, 1, 1,
231 1, -1, 1,
232 1, 1, 1,
233
234 0, 0, 0,
235 0, 0, 0,
236 0, 0, 0,
237
238 2, 2, 2,
239 2, 2, 2,
240 2, 2, 2,
241
242
243 0, 0, 0,
244 0, 0, 0,
245 0, 0, 0,
246
247 1, 1, 1,
248 1, 1, 1,
249 1, 1, 1,
250
251 0, 0, 0,
252 0, 0, 0,
253 0, 0, 0
254 })));
255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000258 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259 QuantizedVector<T>(qScale, qOffset, {
260 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273 })));
274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000275 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276 workloadFactory,
277 memoryManager,
278 input,
279 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100280 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000281 expectedOutput,
282 qScale,
283 qOffset,
284 layout);
telsoa014fcda012018-03-09 14:13:49 +0000285}
286
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289 armnn::IWorkloadFactory& workloadFactory,
290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291 float qScale,
292 int32_t qOffset,
293 bool biasEnabled,
294 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100295{
296 // Use common single-batch 5x5 image.
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100299 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300 {
301 1, 5, 2, 3,
302 8, 7, 3, 6,
303 3, 3, 9, 1
304 });
305
306
307 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100309 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310 4, 5, 6,
311 0, 0, 0,
312 3, 2, 1
313 });
314
315 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000316 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100317
318 const std::vector<float> outputData =
319 {
320 23, 41, 33, 21,
321 44, 65, 76, 52,
322 82, 85, 79, 42
323 };
324
325 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000327 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328 workloadFactory,
329 memoryManager,
330 input,
331 kernel,
332 boost::multi_array<T, 1>(),
333 expectedOutput,
334 dataLayout,
335 qScale,
336 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100337}
338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000340LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341 armnn::IWorkloadFactory& workloadFactory,
342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343 float qScale,
344 int32_t qOffset,
345 bool biasEnabled,
346 const armnn::DataLayout& dataLayout)
347{
348 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000349 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000350 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351 {
352 1, 5, 2, 3, 5,
353 8, 7, 3, 6, 3,
354 3, 3, 9, 1, 9,
355 4, 1, 8, 1, 3,
356 6, 8, 1, 9, 2
357 });
358
359 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000360 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000361 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362 {
363 4, 5, 6,
364 0, 0, 0,
365 3, 2, 1
366 });
367
368 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000370
371 const std::vector<T> outputData =
372 {
373 23, 33, 24,
374 91, 99, 48,
375 26, 50, 19
376 };
377
378 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380 uint32_t padLeft = 1;
381 uint32_t padTop = 1;
382 uint32_t padRight = 1;
383 uint32_t padBottom = 1;
384 uint32_t strideX = 2;
385 uint32_t strideY = 2;
386
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000387 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388 workloadFactory,
389 memoryManager,
390 input,
391 kernel,
392 boost::multi_array<T, 1>(),
393 expectedOutput,
394 dataLayout,
395 qScale,
396 qOffset,
397 padLeft,
398 padTop,
399 padRight,
400 padBottom,
401 strideX,
402 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000415LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416 armnn::IWorkloadFactory& workloadFactory,
417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000419 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000421 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000425LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426 armnn::IWorkloadFactory& workloadFactory,
427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000431 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441 workloadFactory,
442 memoryManager,
443 0.f,
444 0,
445 biasEnabled,
446 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100447}
448
Mike Kelly7332ed82018-12-20 17:03:06 +0000449LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 bool biasEnabled,
453 const armnn::DataLayout layout)
454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000455 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456 workloadFactory,
457 memoryManager,
458 0.f,
459 0,
460 biasEnabled,
461 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000462}
463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000464LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000470 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000472}
473
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100474LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475 armnn::IWorkloadFactory& workloadFactory,
476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477 bool biasEnabled,
478 const armnn::DataLayout layout)
479{
480return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482}
483
484LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485 armnn::IWorkloadFactory& workloadFactory,
486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487 bool biasEnabled,
488 const armnn::DataLayout layout)
489{
490 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492}
493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000494template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000496LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000499 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 float qScale,
501 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000502{
telsoa01c577f2c2018-08-31 09:22:23 +0100503 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000504 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000505 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506 QuantizedVector<T>(qScale, qOffset, {
507 11,21,31,
508 12,22,32,
509 13,23,33
510 })));
511
telsoa01c577f2c2018-08-31 09:22:23 +0100512 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000514 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515 QuantizedVector<T>(qScale, qOffset, {
516 -11,-21,
517 -12,-22,
518 })));
519
telsoa01c577f2c2018-08-31 09:22:23 +0100520// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000521// Manually calculated like this:
522//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
523//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
524//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
527//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
528//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 0, 0, 0, 0, 0, 0,
533 -242, -594, -934, -372, 0, 0,
534 -495, -1190, -1850, -725, 0, 0,
535 -538, -1256, -1916, -748, 0, 0,
536 -273, -626, -946, -363, 0, 0,
537 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0,
539 0, 0, 0, 0, 0, 0
540 })));
541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543 workloadFactory,
544 memoryManager,
545 input,
546 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100547 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 expectedOutput,
549 qScale,
550 qOffset,
551 layout,
552 1, // Padding left.
553 2, // Padding top.
554 3, // Padding right.
555 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000556}
557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000560LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000563 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000564 float qScale,
565 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000566{
telsoa01c577f2c2018-08-31 09:22:23 +0100567 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000568 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000569 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570 QuantizedVector<T>(qScale, qOffset, {
571 11,21,31,41,51,
572 12,22,32,42,52,
573 13,23,33,43,53,
574 14,24,34,44,54,
575 15,25,35,45,55,
576 })));
577
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000580 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581 QuantizedVector<T>(qScale, qOffset, {
582 -11,-21,-31,-41,
583 -12,-22,-32,-42,
584 -13,-23,-33,-43,
585 -14,-24,-34,-44,
586 })));
587
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000589 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000590 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000593 -7140, -10580, -13940, -9300, -5230,
594 -9590, -14120, -18520, -12290, -6860,
595 -9980, -14560, -18960, -12560, -7000,
596 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100597 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000598 })));
599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000600 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000602 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000603 input,
604 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100605 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000606 expectedOutput,
607 qScale,
608 qOffset,
narpra015f703182018-10-26 16:24:58 +0100609 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100610 1, // Padding left.
611 1, // Padding top.
612 2, // Padding right.
613 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100614}
615
Teresa Charlinedeeb162019-06-14 11:09:19 +0100616LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617 armnn::IWorkloadFactory& workloadFactory,
618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619 armnn::DataLayout layout)
620{
621 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622 workloadFactory, memoryManager, layout, 0.0f, 0);
623}
624
625LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 armnn::DataLayout layout)
629{
630 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631 <armnn::DataType::Float32, armnn::DataType::Float32>(
632 workloadFactory, memoryManager, layout, 0.0f, 0);
633}
634
635LayerTestResult<float, 4> Convolution1dTest(
636 armnn::IWorkloadFactory& workloadFactory,
637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638 bool biasEnabled)
639{
640 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642}
643
644LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645 armnn::IWorkloadFactory& workloadFactory,
646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647 bool biasEnabled)
648{
649 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651}
652
653LayerTestResult<float,4> CompareConvolution2dTest(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656 armnn::IWorkloadFactory& refWorkloadFactory)
657{
658 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659 workloadFactory, memoryManager, refWorkloadFactory);
660}
661
662template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664 armnn::IWorkloadFactory& workloadFactory,
665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666 const std::vector<float>& inputNoQuantizedValues,
667 armnn::TensorInfo& inputTensorInfo,
668 const std::vector<float>& kernelNoQuantizedValues,
669 armnn::TensorInfo& kernelTensorInfo,
670 const std::vector<float>& outputExpectedNoQuantizedValues,
671 armnn::TensorInfo& outputTensorInfo,
672 uint32_t dilationX,
673 uint32_t dilationY,
674 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100675 uint32_t padLeft = 0,
676 uint32_t padTop = 0,
677 uint32_t padRight = 0,
678 uint32_t padBottom = 0,
679 uint32_t strideX = 1,
680 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100681 bool biasEnabled = false
682)
683{
684 float qScale;
685 int32_t qOffset;
686 switch (ArmnnType)
687 {
688 case armnn::DataType::QuantisedAsymm8:
689 {
690 qScale = 0.1f;
691 qOffset = 128;
692 break;
693 }
694 case armnn::DataType::QuantisedSymm16:
695 {
696 qScale = 0.1f;
697 qOffset = 0;
698 break;
699 }
700 case armnn::DataType::Float32:
701 default:
702 {
703 qScale = 0.f;
704 qOffset = 0;
705 break;
706 }
707 }
708
709 inputTensorInfo.SetQuantizationScale(qScale);
710 inputTensorInfo.SetQuantizationOffset(qOffset);
711 kernelTensorInfo.SetQuantizationScale(qScale);
712 kernelTensorInfo.SetQuantizationOffset(qOffset);
713 outputTensorInfo.SetQuantizationScale(qScale);
714 outputTensorInfo.SetQuantizationOffset(qOffset);
715
716 auto input = MakeTensor<T, 4>(inputTensorInfo,
717 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718 inputTensorInfo.GetQuantizationOffset(),
719 inputNoQuantizedValues)));
720 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722 kernelTensorInfo.GetQuantizationOffset(),
723 kernelNoQuantizedValues)));
724 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726 outputTensorInfo.GetQuantizationOffset(),
727 outputExpectedNoQuantizedValues)));
728
Teresa Charlinedeeb162019-06-14 11:09:19 +0100729 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730 workloadFactory,
731 memoryManager,
732 input,
733 kernel,
734 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735 expectedOutput,
736 qScale,
737 qOffset,
738 layout,
739 padLeft,
740 padTop,
741 padRight,
742 padBottom,
743 strideX,
744 strideY,
745 dilationX,
746 dilationY);
747}
748
749template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753 bool biasEnabled,
754 const armnn::DataLayout layout)
755{
756 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757 std::vector<float> inputNoQuantizedValues =
758 {
759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769 };
770
771 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772 std::vector<float> kernelNoQuantizedValues =
773 {
774 1, 2, 3,
775 4, 5, 6,
776 7, 8, 9
777 };
778
779 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782 std::vector<float> outputExpectedNoQuantizedValues =
783 {
784 6., 5., 5., 5.,
785 6., 5., 5., 5.,
786 6., 5., 5., 5.,
787 3., 2., 2., 2.
788 };
789
790 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791 workloadFactory,
792 memoryManager,
793 inputNoQuantizedValues,
794 inputTensorInfo,
795 kernelNoQuantizedValues,
796 kernelTensorInfo,
797 outputExpectedNoQuantizedValues,
798 outputTensorInfo,
799 3,
800 3,
801 layout,
802 biasEnabled);
803}
804
805template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 bool biasEnabled,
810 const armnn::DataLayout layout)
811{
812 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813 std::vector<float> inputNoQuantizedValues =
814 {
815 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836 };
837
838 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839 std::vector<float> kernelNoQuantizedValues =
840 {
841 1, 2, 3,
842 4, 5, 6,
843 7, 8, 9,
844
845 1, 2, 3,
846 4, 5, 6,
847 7, 8, 9
848 };
849
850 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853 std::vector<float> outputExpectedNoQuantizedValues =
854 {
855 12., 10., 10., 10.,
856 12., 10., 10., 10.,
857 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100858 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100859 };
860
861 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862 workloadFactory,
863 memoryManager,
864 inputNoQuantizedValues,
865 inputTensorInfo,
866 kernelNoQuantizedValues,
867 kernelTensorInfo,
868 outputExpectedNoQuantizedValues,
869 outputTensorInfo,
870 3,
871 3,
872 layout,
873 biasEnabled);
874}
875
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100876template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878 armnn::IWorkloadFactory &workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880 bool biasEnabled,
881 const armnn::DataLayout layout)
882{
883 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884 std::vector<float> inputNoQuantizedValues =
885 {
886 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896 };
897
898 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899 std::vector<float> kernelNoQuantizedValues =
900 {
901 1, 2,
902 3, 4
903 };
904
905 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
Jan Eilers0bf6b232019-07-12 10:46:33 +0100907 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100908 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909 std::vector<float> outputExpectedNoQuantizedValues =
910 {
911 4, 7, 7, 3,
912 6, 10, 10, 4,
913 6, 10, 10, 4,
914 2, 3, 3, 1
915 };
916 uint32_t padLeft = 1;
917 uint32_t padTop = 1;
918 uint32_t padRight = 1;
919 uint32_t padBottom = 1;
920
921 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922 workloadFactory,
923 memoryManager,
924 inputNoQuantizedValues,
925 inputTensorInfo,
926 kernelNoQuantizedValues,
927 kernelTensorInfo,
928 outputExpectedNoQuantizedValues,
929 outputTensorInfo,
930 2,
931 2,
932 layout,
933 padLeft,
934 padTop,
935 padRight,
936 padBottom,
937 3,
938 3,
939 biasEnabled
940 );
941}
942
Teresa Charlinedeeb162019-06-14 11:09:19 +0100943template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945 armnn::IWorkloadFactory&,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947 bool,
948 armnn::DataLayout);
949
950template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952 armnn::IWorkloadFactory&,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954 bool,
955 armnn::DataLayout);
956
957template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959 armnn::IWorkloadFactory&,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961 bool,
962 armnn::DataLayout);
963
964template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966 armnn::IWorkloadFactory&,
967 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968 bool,
969 armnn::DataLayout);
970
971template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973 armnn::IWorkloadFactory&,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975 bool,
976 armnn::DataLayout);
977
978template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980 armnn::IWorkloadFactory&,
981 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982 bool,
983 armnn::DataLayout);
984
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100985template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987 armnn::IWorkloadFactory &workloadFactory,
988 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989 bool biasEnabled,
990 const armnn::DataLayout layout);
991
992template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994 armnn::IWorkloadFactory &workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996 bool biasEnabled,
997 const armnn::DataLayout layout);
998
999template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001 armnn::IWorkloadFactory &workloadFactory,
1002 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003 bool biasEnabled,
1004 const armnn::DataLayout layout);
1005
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001006template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001008LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009 armnn::IWorkloadFactory& workloadFactory,
1010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011 float qScale,
1012 int32_t qOffset,
1013 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001014 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001015{
telsoa01c577f2c2018-08-31 09:22:23 +01001016 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001018 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001019 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020 {
surmeh013537c2c2018-05-18 16:31:43 +01001021 0, 1, 2, 3, 4,
1022 5, 6, 7, 8, 9,
1023 10, 11, 12, 13, 14,
1024 15, 16, 17, 18, 19,
1025 20, 21, 22, 23, 24,
1026
1027 25, 26, 27, 28, 29,
1028 30, 31, 32, 33, 34,
1029 35, 36, 37, 38, 39,
1030 40, 41, 42, 43, 44,
1031 45, 46, 47, 48, 49
1032 })));
1033
telsoa01c577f2c2018-08-31 09:22:23 +01001034 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001036 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001037 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038 {
surmeh013537c2c2018-05-18 16:31:43 +01001039 32, 31, 30, 29,
1040 28, 27, 26, 25,
1041 24, 23, 22, 21,
1042 20, 19, 18, 17,
1043
1044 16, 15, 14, 13,
1045 12, 11, 10, 9,
1046 8, 7, 6, 5,
1047 4, 3, 2, 1
1048 })));
1049
telsoa01c577f2c2018-08-31 09:22:23 +01001050 // Expected output is 1 batch of a 2-channel 5x5 image.
1051 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001052 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001053 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001054 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055 {
surmeh013537c2c2018-05-18 16:31:43 +01001056 1062, 1580, 1850, 1530, 1117,
1057 2140, 3108, 3500, 2842, 2042,
1058 3580, 5068, 5460, 4342, 3062,
1059 3618, 5072, 5390, 4248, 2971,
1060 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061
surmeh013537c2c2018-05-18 16:31:43 +01001062 1550, 2284, 2362, 1955, 1428,
1063 2910, 4206, 4342, 3528, 2536,
1064 3390, 4886, 5022, 4068, 2916,
1065 3566, 5056, 5182, 4133, 2922,
1066 3100, 4352, 4452, 3517, 2465
1067 })));
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001072 input,
1073 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001074 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001075 expectedOutput,
1076 qScale,
1077 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001078 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001079 1, // Padding left.
1080 1, // Padding top.
1081 2, // Padding right.
1082 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001083 1, // strideX
1084 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001085}
1086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001087template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001089LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090 armnn::IWorkloadFactory& workloadFactory,
1091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092 float qScale,
1093 int32_t qOffset,
1094 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001095{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 auto layout = armnn::DataLayout::NHWC;
1097
1098 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001099 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101 {
1102 0, 1, 2, 3, 4,
1103 5, 6, 7, 8, 9,
1104 10, 11, 12, 13, 14,
1105 15, 16, 17, 18, 19,
1106 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001107
Teresa Charlin20b1f882019-06-19 09:34:37 +01001108 25, 26, 27, 28, 29,
1109 30, 31, 32, 33, 34,
1110 35, 36, 37, 38, 39,
1111 40, 41, 42, 43, 44,
1112 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001113 })));
1114
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001115 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001116 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001119 32, 31, 30, 29,
1120 28, 27, 26, 25,
1121 24, 23, 22, 21,
1122 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123
Matteo Martincigh747ef822018-12-18 09:26:39 +00001124 16, 15, 14, 13,
1125 12, 11, 10, 9,
1126 8, 7, 6, 5,
1127 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001128 })));
1129
Teresa Charlin20b1f882019-06-19 09:34:37 +01001130 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001131 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001132 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133 {
1134 1062, 1580, 1850, 1530, 1117,
1135 2140, 3108, 3500, 2842, 2042,
1136 3580, 5068, 5460, 4342, 3062,
1137 3618, 5072, 5390, 4248, 2971,
1138 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001139
Teresa Charlin20b1f882019-06-19 09:34:37 +01001140 1550, 2284, 2362, 1955, 1428,
1141 2910, 4206, 4342, 3528, 2536,
1142 3390, 4886, 5022, 4068, 2916,
1143 3566, 5056, 5182, 4133, 2922,
1144 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001145 })));
1146
Teresa Charlin20b1f882019-06-19 09:34:37 +01001147 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001148 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001149 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001150 input,
1151 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001152 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001153 expectedOutput,
1154 qScale,
1155 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001156 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001157 1, // Padding left.
1158 1, // Padding top.
1159 2, // Padding right.
1160 2, // Padding bottom.
1161 1, // strideX
1162 1); // strideY
1163}
1164
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166 typename T = armnn::ResolveType<ArmnnType>>
1167LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168 armnn::IWorkloadFactory& workloadFactory,
1169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170 float qScale,
1171 int32_t qOffset,
1172 bool biasEnabled)
1173{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001174 auto layout = armnn::DataLayout::NHWC;
1175
1176 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001177 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001178 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001180 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0
1189 })));
1190
1191 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001193 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 1, 2, 3,
1196 4, 5, 6,
1197 7, 8, 9
1198 })));
1199
1200 uint32_t padLeft = 0;
1201 uint32_t padTop = 0;
1202 uint32_t padRight = 0;
1203 uint32_t padBottom = 0;
1204 uint32_t strideX = 1;
1205 uint32_t strideY = 1;
1206 uint32_t dilationX = 3;
1207 uint32_t dilationY = 3;
1208
1209 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001210 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001212 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001214 5, 5, 5,
1215 5, 5, 5,
1216 5, 5, 5
1217 })));
1218
Teresa Charlin20b1f882019-06-19 09:34:37 +01001219 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001220 workloadFactory,
1221 memoryManager,
1222 input,
1223 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001224 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001225 expectedOutput,
1226 qScale,
1227 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001228 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001229 padLeft,
1230 padTop,
1231 padRight,
1232 padBottom,
1233 strideX,
1234 strideY,
1235 dilationX,
1236 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001237}
1238
Teresa Charlin20b1f882019-06-19 09:34:37 +01001239
1240template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242 armnn::IWorkloadFactory& workloadFactory,
1243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244 const std::vector<float>& inputNoQuantizedValues,
1245 armnn::TensorInfo& inputTensorInfo,
1246 const std::vector<float>& kernelNoQuantizedValues,
1247 armnn::TensorInfo& kernelTensorInfo,
1248 const std::vector<float>& outputExpectedNoQuantizedValues,
1249 armnn::TensorInfo& outputTensorInfo,
1250 uint32_t dilationX,
1251 uint32_t dilationY,
1252 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253 bool biasEnabled = false)
1254{
1255 float qScale;
1256 int32_t qOffset;
1257 switch (ArmnnType)
1258 {
1259 case armnn::DataType::QuantisedAsymm8:
1260 {
1261 qScale = 0.1f;
1262 qOffset = 128;
1263 break;
1264 }
1265 case armnn::DataType::QuantisedSymm16:
1266 {
1267 qScale = 0.1f;
1268 qOffset = 0;
1269 break;
1270 }
1271 case armnn::DataType::Float32:
1272 default:
1273 {
1274 qScale = 0.f;
1275 qOffset = 0;
1276 break;
1277 }
1278 }
1279
1280 inputTensorInfo.SetQuantizationScale(qScale);
1281 inputTensorInfo.SetQuantizationOffset(qOffset);
1282 kernelTensorInfo.SetQuantizationScale(qScale);
1283 kernelTensorInfo.SetQuantizationOffset(qOffset);
1284 outputTensorInfo.SetQuantizationScale(qScale);
1285 outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287 auto input = MakeTensor<T, 4>(inputTensorInfo,
1288 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289 inputTensorInfo.GetQuantizationOffset(),
1290 inputNoQuantizedValues)));
1291 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293 kernelTensorInfo.GetQuantizationOffset(),
1294 kernelNoQuantizedValues)));
1295 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297 outputTensorInfo.GetQuantizationOffset(),
1298 outputExpectedNoQuantizedValues)));
1299
1300 uint32_t padLeft = 0;
1301 uint32_t padTop = 0;
1302 uint32_t padRight = 0;
1303 uint32_t padBottom = 0;
1304 uint32_t strideX = 1;
1305 uint32_t strideY = 1;
1306
1307 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308 workloadFactory,
1309 memoryManager,
1310 input,
1311 kernel,
1312 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313 expectedOutput,
1314 qScale,
1315 qOffset,
1316 layout,
1317 padLeft,
1318 padTop,
1319 padRight,
1320 padBottom,
1321 strideX,
1322 strideY,
1323 dilationX,
1324 dilationY);
1325}
1326
1327template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 bool biasEnabled,
1332 const armnn::DataLayout layout)
1333{
1334 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335 std::vector<float> inputNoQuantizedValues =
1336 {
1337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347 };
1348
1349 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350 std::vector<float> kernelNoQuantizedValues =
1351 {
1352 1, 2, 3,
1353 4, 5, 6,
1354 7, 8, 9
1355 };
1356
1357 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360 std::vector<float> outputExpectedNoQuantizedValues =
1361 {
1362 6., 5., 5., 5.,
1363 6., 5., 5., 5.,
1364 6., 5., 5., 5.,
1365 3., 2., 2., 2.
1366 };
1367
1368 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369 workloadFactory,
1370 memoryManager,
1371 inputNoQuantizedValues,
1372 inputTensorInfo,
1373 kernelNoQuantizedValues,
1374 kernelTensorInfo,
1375 outputExpectedNoQuantizedValues,
1376 outputTensorInfo,
1377 3,
1378 3,
1379 layout,
1380 biasEnabled);
1381}
1382
1383template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385 armnn::IWorkloadFactory& workloadFactory,
1386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387 bool biasEnabled,
1388 const armnn::DataLayout layout)
1389{
1390 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391 std::vector<float> inputNoQuantizedValues =
1392 {
1393 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414 };
1415
1416 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417 std::vector<float> kernelNoQuantizedValues =
1418 {
1419 1, 2, 3,
1420 4, 5, 6,
1421 7, 8, 9,
1422
1423 1, 2, 3,
1424 4, 5, 6,
1425 7, 8, 9
1426 };
1427
1428 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431 std::vector<float> outputExpectedNoQuantizedValues =
1432 {
1433 6., 5., 5., 5.,
1434 6., 5., 5., 5.,
1435 6., 5., 5., 5.,
1436 3., 2., 2., 2.,
1437
1438 6., 5., 5., 5.,
1439 6., 5., 5., 5.,
1440 6., 5., 5., 5.,
1441 3., 2., 2., 2.
1442 };
1443
1444 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445 workloadFactory,
1446 memoryManager,
1447 inputNoQuantizedValues,
1448 inputTensorInfo,
1449 kernelNoQuantizedValues,
1450 kernelTensorInfo,
1451 outputExpectedNoQuantizedValues,
1452 outputTensorInfo,
1453 3,
1454 3,
1455 layout,
1456 biasEnabled);
1457}
1458
1459
1460template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462 armnn::IWorkloadFactory&,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464 bool,
1465 armnn::DataLayout);
1466
1467template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469 armnn::IWorkloadFactory&,
1470 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471 bool,
1472 armnn::DataLayout);
1473
1474template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476 armnn::IWorkloadFactory&,
1477 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478 bool,
1479 armnn::DataLayout);
1480
1481template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483 armnn::IWorkloadFactory&,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485 bool,
1486 armnn::DataLayout);
1487
1488template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490 armnn::IWorkloadFactory&,
1491 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492 bool,
1493 armnn::DataLayout);
1494
1495template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497 armnn::IWorkloadFactory&,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499 bool,
1500 armnn::DataLayout);
1501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503 armnn::IWorkloadFactory& workloadFactory,
1504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001506 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001508 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001509 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001510}
1511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513 armnn::IWorkloadFactory& workloadFactory,
1514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001517 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001519}
1520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001525 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001527 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001528 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001531LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534{
1535 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538 std::vector<float> kernelData;
1539 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540 for (unsigned int i = 0; i < 64; ++i)
1541 {
1542 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543 }
1544 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547 std::vector<float> expectedOutputData(64, 0.f);
1548 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552 workloadFactory,
1553 memoryManager,
1554 input,
1555 kernel,
1556 boost::multi_array<float, 1>(),
1557 expectedOutput,
1558 0.f,
1559 0,
1560 armnn::DataLayout::NCHW);
1561}
1562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001563LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001567 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001569 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001570 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001571}
1572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001573LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574 armnn::IWorkloadFactory& workloadFactory,
1575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001577 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001578{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001579 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001580 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001581}
1582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001583LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584 armnn::IWorkloadFactory& workloadFactory,
1585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001587 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001589 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001590 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001591}
1592
Bruno Goncalves22972f02019-04-26 21:03:24 -03001593LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594 armnn::IWorkloadFactory& workloadFactory,
1595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596{
1597 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001598 workloadFactory,
1599 memoryManager,
1600 0.f,
1601 0,
1602 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001603}
1604
Ruomei Yan88d44b82019-05-23 14:29:06 +01001605LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 bool biasEnabled,
1609 const armnn::DataLayout layout)
1610{
1611 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613}
1614
1615LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 bool biasEnabled,
1619 const armnn::DataLayout layout)
1620{
1621 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623}
1624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001625LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001629 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001631 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001633}
1634
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001635LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636 armnn::IWorkloadFactory& workloadFactory,
1637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638 armnn::IWorkloadFactory& refWorkloadFactory,
1639 const armnn::DataLayout layout)
1640{
1641 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642 workloadFactory, memoryManager, refWorkloadFactory, layout);
1643}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001644
1645LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001648{
1649 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001651 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
1658 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001660 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001666{
1667 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001669 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001670}
1671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001672LayerTestResult<float,2> SimpleSoftmaxTest(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001677 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001678}
1679
Francis Murtagh07f21212019-07-23 09:50:50 +01001680LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681 armnn::IWorkloadFactory& workloadFactory,
1682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683 float beta,
1684 int axis)
1685{
1686 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687}
1688
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001689LayerTestResult<float,3> Simple3dSoftmaxTest(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692 float beta)
1693{
Francis Murtagh07f21212019-07-23 09:50:50 +01001694 Simple3dSoftmaxOutputData data;
1695 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696 data.inputShape, data.outputData, data.inputData);
1697}
1698
1699LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 float beta,
1703 int axis)
1704{
1705 armnn::TensorShape inputShape;
1706 std::vector<float> inputData;
1707 std::vector<float> outputData;
1708 switch (axis)
1709 {
1710 case -3:
1711 case 0:
1712 {
1713 inputShape = {5, 2, 2};
1714
1715 inputData =
1716 {
1717 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720 };
1721
1722 outputData =
1723 {
1724 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725 0.236882800924671f,
1726 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727 0.087144312427294f,
1728
1729 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730 0.032058600957022f,
1731 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732 7.246299848982885e-08f
1733 };
1734 break;
1735 }
1736 case -2:
1737 case 1:
1738 {
1739 inputShape = {2, 5, 2};
1740
1741 inputData =
1742 {
1743 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746 };
1747
1748 outputData =
1749 {
1750 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751 0.087144312427294f,
1752 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753 7.246299848982885e-08f,
1754
1755 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756 0.087144312427294f,
1757 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758 7.246299848982885e-08f
1759 };
1760 break;
1761 }
1762 case -1:
1763 case 2:
1764 {
1765 inputShape = {2, 2, 5};
1766
1767 inputData =
1768 {
1769 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771 };
1772
1773 outputData =
1774 {
1775 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776 7.246299848982885e-08f,
1777 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778 7.246299848982885e-08f,
1779
1780 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781 7.246299848982885e-08f,
1782 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783 7.246299848982885e-08f
1784 };
1785 break;
1786 }
1787 }
1788
1789 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790 inputShape, outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001791}
1792
1793LayerTestResult<float,4> Simple4dSoftmaxTest(
1794 armnn::IWorkloadFactory& workloadFactory,
1795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796 float beta)
1797{
Francis Murtagh07f21212019-07-23 09:50:50 +01001798 Simple4dSoftmaxData data;
1799 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800 data.outputData, data.inputData);
1801}
1802
1803LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804 armnn::IWorkloadFactory& workloadFactory,
1805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806 float beta,
1807 int axis)
1808{
1809 armnn::TensorShape inputShape;
1810 std::vector<float> inputData;
1811 std::vector<float> outputData;
1812 switch (axis)
1813 {
1814 case -4:
1815 case 0:
1816 {
1817 inputShape = {5, 2, 2, 2};
1818
1819 inputData =
1820 {
1821 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825 };
1826
1827 outputData =
1828 {
1829 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830 0.643914213228014f,
1831 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832 0.236882800924671f,
1833 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834 0.236882800924671f,
1835 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836 0.087144312427294f,
1837
1838 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839 0.032058600957022f,
1840 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841 0.032058600957022f,
1842 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843 7.246299848982885e-08f,
1844 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845 7.246299848982885e-08f, 7.246299848982885e-08f
1846 };
1847 break;
1848 }
1849 case -3:
1850 case 1:
1851 {
1852 inputShape = {2, 5, 2, 2};
1853
1854 inputData =
1855 {
1856 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860 };
1861
1862 outputData =
1863 {
1864 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865 0.236882800924671f,
1866 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867 0.087144312427294f,
1868 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869 0.032058600957022f,
1870 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871 7.246299848982885e-08f,
1872
1873
1874 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875 0.236882800924671f,
1876 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877 0.087144312427294f,
1878 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879 0.032058600957022f,
1880 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881 7.246299848982885e-08f
1882 };
1883 break;
1884 }
1885 case -2:
1886 case 2:
1887 {
1888 inputShape = {2, 2, 5, 2};
1889
1890 inputData =
1891 {
1892 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896 };
1897
1898 outputData =
1899 {
1900 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901 0.087144312427294f,
1902 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903 7.246299848982885e-08f,
1904 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905 0.087144312427294f,
1906 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907 7.246299848982885e-08f,
1908
1909 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910 0.087144312427294f,
1911 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912 7.246299848982885e-08f,
1913 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914 0.087144312427294f,
1915 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916 7.246299848982885e-08f
1917 };
1918 break;
1919 }
1920 case -1:
1921 case 3:
1922 {
1923 inputShape = {2, 2, 2, 5};
1924
1925 inputData =
1926 {
1927 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931 };
1932
1933 outputData =
1934 {
1935 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936 7.246299848982885e-08f,
1937 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938 7.246299848982885e-08f,
1939 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940 7.246299848982885e-08f,
1941 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942 7.246299848982885e-08f,
1943
1944 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945 7.246299848982885e-08f,
1946 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947 7.246299848982885e-08f,
1948 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949 7.246299848982885e-08f,
1950 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951 7.246299848982885e-08f
1952 };
1953 break;
1954 }
1955 }
1956
1957 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958 outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001959}
1960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001961LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962 armnn::IWorkloadFactory& workloadFactory,
1963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001966 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001967}
1968
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001969LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970 armnn::IWorkloadFactory& workloadFactory,
1971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972 float beta)
1973{
Francis Murtagh07f21212019-07-23 09:50:50 +01001974 Simple3dSoftmaxOutputData data;
1975 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001977}
1978
1979LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982 float beta)
1983{
Francis Murtagh07f21212019-07-23 09:50:50 +01001984 Simple4dSoftmaxData data;
1985
1986 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001988}
1989
nikraj01248683f2019-05-29 16:46:50 +01001990LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993 float beta)
1994{
1995 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996}
1997
1998LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999 armnn::IWorkloadFactory& workloadFactory,
2000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001 float beta)
2002{
Francis Murtagh07f21212019-07-23 09:50:50 +01002003 Simple3dSoftmaxOutputData data;
2004 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002006}
2007
2008LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011 float beta)
2012{
Francis Murtagh07f21212019-07-23 09:50:50 +01002013 Simple4dSoftmaxData data;
2014
2015 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002017}
2018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002019LayerTestResult<float,4> CompareNormalizationTest(
2020 armnn::IWorkloadFactory& workloadFactory,
2021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022 armnn::IWorkloadFactory& refWorkloadFactory,
2023 armnn::NormalizationAlgorithmChannel normChannel,
2024 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00002025{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002026 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00002027}
2028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002029LayerTestResult<float,2> CompareSoftmaxTest(
2030 armnn::IWorkloadFactory& workloadFactory,
2031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002032 armnn::IWorkloadFactory& refWorkloadFactory,
2033 float beta)
2034{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002035 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002037}
2038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 armnn::IWorkloadFactory& refWorkloadFactory,
2043 float beta)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002047}
2048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002049std::vector<LayerTestResult<float,3>> SplitterTest(
2050 armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002053 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00002054}
2055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002056std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002060 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002061}
2062
Ruomei Yan25339c32019-05-28 16:48:20 +01002063std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064 armnn::IWorkloadFactory& workloadFactory,
2065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066{
2067 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068}
2069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002070LayerTestResult<float, 3> CopyViaSplitterTest(
2071 armnn::IWorkloadFactory& workloadFactory,
2072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002074 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002075}
2076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002077LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078 armnn::IWorkloadFactory& workloadFactory,
2079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002081 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002082}
2083
Ruomei Yan25339c32019-05-28 16:48:20 +01002084LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085 armnn::IWorkloadFactory& workloadFactory,
2086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087{
2088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089}
2090
Jan Eilers38e05bd2019-06-26 13:10:09 +01002091void LstmUtilsZeroVectorTest()
2092{
2093 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095 {2., 3., 3., 4.}));
2096
2097 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098 {0., 0., 0., 0.}));
2099
2100 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101}
2102
2103void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104{
2105 uint32_t batchSize = 2;
2106 uint32_t vecSize = 4;
2107 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
2110 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
2111
2112 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
2114 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
2115
2116 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117 vecSize, batchSize, expectedOutput);
2118}
2119
2120void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121{
2122 uint32_t batchSize = 2;
2123 uint32_t vecSize = 4;
2124 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2127 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2128
2129 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2131 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2132
2133 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134 vecSize, batchSize, expectedOutput);
2135}
2136
2137void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138{
2139 uint32_t batchSize = 2;
2140 uint32_t vecSize = 4;
2141 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2144 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
2145
2146 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2148 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
2149
2150 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151 vecSize, batchSize, expectedOutput);
2152}
2153
2154
2155void LstmUtilsVectorBatchVectorCwiseProductTest()
2156{
2157 uint32_t batchSize = 4;
2158 uint32_t vecSize = 29;
2159 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2162 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
2164
2165 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167 { /* batch 0 */
2168 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2169 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2170 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
2171 /* batch 1 */
2172 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
2173 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
2175 /* batch 2 */
2176 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
2177 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
2179 /* batch 3 */
2180 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
2181 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
2183
2184 // Expect output = input * output + output.
2185 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186 { /* batch 0 */
2187 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
2188 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
2189 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
2190 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
2191 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
2192 /* batch 1 */
2193 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
2194 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
2198 /* batch 2 */
2199 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
2200 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
2201 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
2202 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
2203 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
2204 /* batch 3 */
2205 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
2206 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
2207 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
2208 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
2209 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
2210
2211 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212 vecSize, batchSize, expectedOutput);
2213}
2214
2215
2216void LstmUtilsVectorBatchVectorAddTest()
2217{
2218 uint32_t batchSize = 2;
2219 uint32_t vecSize = 3;
2220 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222 { 0.0f, -0.5f, 1.0f}));
2223
2224 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226 { 1.0f, 2.0f, 3.0f, //batch 0
2227 4.0f, 5.0f, 6.0f})); //batch 1
2228
2229 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230 { 1.0f, 1.5f, 4.0f,
2231 4.0f, 4.5f, 7.0f}));
2232
2233 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234 vecSize, batchSize, expectedOutput);
2235}
2236
2237
telsoa01c577f2c2018-08-31 09:22:23 +01002238LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239 armnn::IWorkloadFactory& workloadFactory,
2240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002242 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002243 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244 { 2., 3., 3., 4. }));
2245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002246 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002247 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002250 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002251 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002252}
2253
2254LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01002255 armnn::IWorkloadFactory& workloadFactory,
2256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002258 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002259 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002263 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002264 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2266 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2267 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2268 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2269 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2270 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
2271 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002272 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002274}
2275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002276LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277 armnn::IWorkloadFactory& workloadFactory,
2278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002280 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002281 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282 {2., 3., 3., 4.}));
2283
2284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002285 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002286 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2288 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
2289
Conor Kennedyb9971c92019-05-07 07:14:23 +01002290 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002291 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002292}
2293
Jan Eilers38e05bd2019-06-26 13:10:09 +01002294
2295LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
2302 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
2303
2304 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2307 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2308 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309 workloadFactory, memoryManager, input, expectedOutput);
2310}
2311
2312
Conor Kennedyb9971c92019-05-07 07:14:23 +01002313LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314 armnn::IWorkloadFactory& workloadFactory,
2315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316{
2317 const float qScale = 1.0f;
2318 const int32_t qOffset = 0;
2319
2320 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323 armnn::TensorInfo inputDesc({2, 2}, datatype);
2324 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325 std::vector<float>{2., 3., 3., 4.}));
2326
2327 armnn::TensorInfo outputDesc({2, 4}, datatype);
2328 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2330 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2331
2332 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335}
2336
2337LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338 armnn::IWorkloadFactory& workloadFactory,
2339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340{
2341 const float qScale = 1.0f;
2342 const int32_t qOffset = 0;
2343
2344 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349 std::vector<float>({ 2., 3., 3., 4. })));
2350
2351 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353 qOffset, std::vector<float>(
2354 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359}
2360
2361LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362 armnn::IWorkloadFactory& workloadFactory,
2363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364{
2365 const float qScale = 2.0f;
2366 const int32_t qOffset = 0;
2367
2368 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373 qOffset, std::vector<float>(
2374 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379 qOffset, std::vector<float>(
2380 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2381 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2382 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2383 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2384 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2385 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2386
2387 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389}
2390
2391LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392 armnn::IWorkloadFactory& workloadFactory,
2393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394{
2395 const float qScale = 1.0f;
2396 const int32_t qOffset = 0;
2397
2398 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400 armnn::TensorInfo inputDesc({2, 2}, datatype);
2401 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402 qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404 armnn::TensorInfo outputDesc({2, 4}, datatype);
2405 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2407 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2408
2409 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411}
2412
Jim Flynn4ed6c832019-05-20 11:02:46 +01002413LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002414 armnn::IWorkloadFactory& workloadFactory,
2415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002416{
surmeh013537c2c2018-05-18 16:31:43 +01002417 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002418 unsigned int outputHeight = 6;
2419 unsigned int outputChannels = 3;
2420
surmeh013537c2c2018-05-18 16:31:43 +01002421 unsigned int inputWidth1 = 3;
2422 unsigned int inputHeight1 = 6;
2423 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002424
surmeh013537c2c2018-05-18 16:31:43 +01002425 unsigned int inputWidth2 = 3;
2426 unsigned int inputHeight2 = 6;
2427 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002428
telsoa01c577f2c2018-08-31 09:22:23 +01002429 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002430 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2431 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2432 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002433
2434 LayerTestResult<float,3> ret(outputTensorInfo);
2435
telsoa014fcda012018-03-09 14:13:49 +00002436 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002437 {
2438 1.0f, 2.0f, 3.0f,
2439 4.0f, 5.0f, 6.0f,
2440 7.0f, 8.0f, 9.0f,
2441 10.0f, 11.0f, 12.0f,
2442 13.0f, 14.0f, 15.0f,
2443 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002444
surmeh013537c2c2018-05-18 16:31:43 +01002445 19.0f, 20.0f, 21.0f,
2446 22.0f, 23.0f, 24.0f,
2447 25.0f, 26.0f, 27.0f,
2448 28.0f, 29.0f, 30.0f,
2449 31.0f, 32.0f, 33.0f,
2450 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002451
surmeh013537c2c2018-05-18 16:31:43 +01002452 37.0f, 38.0f, 39.0f,
2453 40.0f, 41.0f, 42.0f,
2454 43.0f, 44.0f, 45.0f,
2455 46.0f, 47.0f, 48.0f,
2456 49.0f, 50.0f, 51.0f,
2457 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002458 })
2459 );
2460
telsoa014fcda012018-03-09 14:13:49 +00002461 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2462 {
surmeh013537c2c2018-05-18 16:31:43 +01002463 1.0f, 2.0f, 3.0f,
2464 4.0f, 5.0f, 6.0f,
2465 7.0f, 8.0f, 9.0f,
2466 10.0f, 11.0f, 12.0f,
2467 13.0f, 14.0f, 15.0f,
2468 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002469
surmeh013537c2c2018-05-18 16:31:43 +01002470 19.0f, 20.0f, 21.0f,
2471 22.0f, 23.0f, 24.0f,
2472 25.0f, 26.0f, 27.0f,
2473 28.0f, 29.0f, 30.0f,
2474 31.0f, 32.0f, 33.0f,
2475 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002476 })
2477 );
2478
2479 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2480 {
surmeh013537c2c2018-05-18 16:31:43 +01002481 37.0f, 38.0f, 39.0f,
2482 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002483 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002484 46.0f, 47.0f, 48.0f,
2485 49.0f, 50.0f, 51.0f,
2486 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002487 })
2488 );
2489
telsoa01c577f2c2018-08-31 09:22:23 +01002490 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002491 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002492
telsoa01c577f2c2018-08-31 09:22:23 +01002493 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002494 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002495
telsoa014fcda012018-03-09 14:13:49 +00002496 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2497
2498 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2499
2500 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2501 subTensorsSupported ?
2502 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2503 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2504
2505 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2506 subTensorsSupported ?
2507 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2508 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2509
Jim Flynne242f2d2019-05-22 14:24:13 +01002510 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002511 armnn::WorkloadInfo info;
2512 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2513 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002514 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2515
2516 data.m_ViewOrigins.push_back(window1);
2517 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002518
Jim Flynn4ed6c832019-05-20 11:02:46 +01002519 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002520
2521 inputHandle1->Allocate();
2522 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002523 outputHandle->Allocate();
2524
2525 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2526 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002527
Derek Lambertif30f7d32019-04-09 10:25:02 +01002528 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002529 workload->Execute();
2530
2531 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2532
2533 return ret;
2534}
2535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002536LayerTestResult<float,4> AdditionTest(
2537 armnn::IWorkloadFactory& workloadFactory,
2538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002539{
2540 unsigned int batchSize = 2;
2541 unsigned int channels = 2;
2542 unsigned int height = 2;
2543 unsigned int width = 3;
2544
2545 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2546 armnn::TensorInfo outputTensorInfo;
2547
2548 unsigned int shape[] = {batchSize, channels, height, width};
2549
2550 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2551 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2552 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2553
2554
2555 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2556 {
2557 0.0f, 2.0f, 1.0f,
2558 0.2f, 1.0f, 2.0f,
2559
2560 1.0f, 2.0f, 1.0f,
2561 0.2f, 1.0f, 2.0f,
2562
2563 0.0f, 2.0f, 1.0f,
2564 4.2f, 1.0f, 2.0f,
2565
2566 0.0f, 0.0f, 1.0f,
2567 0.2f, 1.0f, 2.0f,
2568 }));
2569
2570 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2571 {
2572 1.0f, 2.0f, 1.0f,
2573 0.0f, 1.0f, 2.0f,
2574
2575 1.0f, 2.0f, -2.0f,
2576 0.2f, 1.0f, 2.0f,
2577
2578 0.0f, 2.0f, 1.0f,
2579 4.2f, 0.0f, -3.0f,
2580
2581 0.0f, 0.0f, 1.0f,
2582 0.7f, 1.0f, 5.0f,
2583 }));
2584
2585 LayerTestResult<float,4> ret(outputTensorInfo);
2586 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2587 {
2588 1.0f, 4.0f, 2.0f,
2589 0.2f, 2.0f, 4.0f,
2590
2591 2.0f, 4.0f, -1.0f,
2592 0.4f, 2.0f, 4.0f,
2593
2594 0.0f, 4.0f, 2.0f,
2595 8.4f, 1.0f, -1.0f,
2596
2597 0.0f, 0.0f, 2.0f,
2598 0.9f, 2.0f, 7.0f,
2599 }));
2600
2601 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2602 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2603 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2604
2605 armnn::AdditionQueueDescriptor data;
2606 armnn::WorkloadInfo info;
2607 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2608 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2609 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2610
2611 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2612
2613 inputHandle1->Allocate();
2614 inputHandle2->Allocate();
2615 outputHandle->Allocate();
2616
2617 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2618 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2619
Derek Lambertif30f7d32019-04-09 10:25:02 +01002620 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002621 workload->Execute();
2622
2623 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2624
2625 return ret;
2626}
2627
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002628template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002629LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2630 armnn::IWorkloadFactory& workloadFactory,
2631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002632 float qScale,
2633 int32_t qOffset)
2634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002635 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2636 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2637 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002638
2639 if (armnn::IsQuantizedType<T>())
2640 {
2641 inputTensorInfo1.SetQuantizationScale(qScale);
2642 inputTensorInfo1.SetQuantizationOffset(qOffset);
2643 inputTensorInfo2.SetQuantizationScale(qScale);
2644 inputTensorInfo2.SetQuantizationOffset(qOffset);
2645 outputTensorInfo.SetQuantizationScale(qScale);
2646 outputTensorInfo.SetQuantizationOffset(qOffset);
2647 }
2648
2649 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2650 {
2651 0.0f,
2652 1.0f,
2653
2654 2.0f,
2655 3.0f,
2656
2657 4.0f,
2658 5.0f,
2659 }));
2660
2661 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2662 {
2663 0.5f, 1.5f, 2.5f,
2664 3.5f, 4.5f, 5.5f,
2665 }));
2666
2667 LayerTestResult<T,4> ret(outputTensorInfo);
2668 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2669 {
2670 0.5f, 1.5f, 2.5f,
2671 4.5f, 5.5f, 6.5f,
2672
2673 2.5f, 3.5f, 4.5f,
2674 6.5f, 7.5f, 8.5f,
2675
2676 4.5f, 5.5f, 6.5f,
2677 8.5f, 9.5f, 10.5f,
2678 }));
2679
2680 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2681 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2682 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2683
2684 armnn::AdditionQueueDescriptor data;
2685 armnn::WorkloadInfo info;
2686 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2687 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2688 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2689
2690 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2691
2692 inputHandle1->Allocate();
2693 inputHandle2->Allocate();
2694 outputHandle->Allocate();
2695
2696 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2697 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2698
Derek Lambertif30f7d32019-04-09 10:25:02 +01002699 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002700 workload->Execute();
2701
2702 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2703
2704 return ret;
2705}
2706
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002707template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002708LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002711 float qScale,
2712 int32_t qOffset)
2713{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002714 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2715 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2716 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002717
2718 if (armnn::IsQuantizedType<T>())
2719 {
2720 inputTensorInfo1.SetQuantizationScale(qScale);
2721 inputTensorInfo1.SetQuantizationOffset(qOffset);
2722 inputTensorInfo2.SetQuantizationScale(qScale);
2723 inputTensorInfo2.SetQuantizationOffset(qOffset);
2724 outputTensorInfo.SetQuantizationScale(qScale);
2725 outputTensorInfo.SetQuantizationOffset(qOffset);
2726 }
2727
2728 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2729 {
2730 0.0f, 1.0f, 2.0f,
2731 3.0f, 4.0f, 5.0f,
2732 6.0f, 7.0f, 8.0f,
2733 9.0f, 10.0f, 11.0f,
2734 12.0f, 13.0f, 14.0f,
2735 15.0f, 16.0f, 17.0f,
2736 }));
2737
2738 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2739 {
2740 0.5f,
2741 }));
2742
2743 LayerTestResult<T,4> ret(outputTensorInfo);
2744 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2745 {
2746 0.5f, 1.5f, 2.5f,
2747 3.5f, 4.5f, 5.5f,
2748 6.5f, 7.5f, 8.5f,
2749 9.5f, 10.5f, 11.5f,
2750 12.5f, 13.5f, 14.5f,
2751 15.5f, 16.5f, 17.5f,
2752 }));
2753
2754 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2755 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2756 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2757
2758 armnn::AdditionQueueDescriptor data;
2759 armnn::WorkloadInfo info;
2760 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2761 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2762 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2763
2764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2765
2766 inputHandle1->Allocate();
2767 inputHandle2->Allocate();
2768 outputHandle->Allocate();
2769
2770 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2771 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2772
Derek Lambertif30f7d32019-04-09 10:25:02 +01002773 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002774 workload->Execute();
2775
2776 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2777
2778 return ret;
2779}
2780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002781LayerTestResult<float, 4> AdditionBroadcastTest(
2782 armnn::IWorkloadFactory& workloadFactory,
2783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002784{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002785 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2786 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002787}
2788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002789LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2790 armnn::IWorkloadFactory& workloadFactory,
2791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002792{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002793 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2794 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002795}
2796
Sadik Armagan2999a022019-04-09 14:20:12 +01002797LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2798 armnn::IWorkloadFactory& workloadFactory,
2799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2800{
2801 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2802 workloadFactory, memoryManager, 2.f, 0);
2803}
2804
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002805LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002808{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002809 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2810 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002811}
2812
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002813LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2814 armnn::IWorkloadFactory& workloadFactory,
2815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002816{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002817 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2818 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002819}
2820
Sadik Armagan2999a022019-04-09 14:20:12 +01002821LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2824{
2825 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2826 workloadFactory, memoryManager, 0.1333333f, 0);
2827}
2828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002829LayerTestResult<float,4> CompareAdditionTest(
2830 armnn::IWorkloadFactory& workloadFactory,
2831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2832 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002833{
2834 unsigned int batchSize = 4;
2835 unsigned int channels = 1;
2836 unsigned int height = 2;
2837 unsigned int width = 3;
2838
2839 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2840 armnn::TensorInfo outputTensorInfo;
2841
2842 unsigned int shape[] = {batchSize, channels, height, width};
2843
2844 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2845 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2846 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2847
2848 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2849 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2850
2851 LayerTestResult<float,4> ret(outputTensorInfo);
2852
2853 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2854 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2855 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2856
2857 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2858 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2859 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2860
2861 armnn::AdditionQueueDescriptor data;
2862 armnn::WorkloadInfo info;
2863 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2864 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2865 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2866
2867 armnn::AdditionQueueDescriptor refData = data;
2868 armnn::WorkloadInfo refInfo = info;
2869 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2870 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2871 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2872
2873 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2874 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2875
2876 inputHandle1->Allocate();
2877 inputHandle2->Allocate();
2878 outputHandle->Allocate();
2879 inputHandle1Ref->Allocate();
2880 inputHandle2Ref->Allocate();
2881 outputHandleRef->Allocate();
2882
2883 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2884 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2885 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2886 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2887
Derek Lambertif30f7d32019-04-09 10:25:02 +01002888 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002889 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002890 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002891 workloadRef->Execute();
2892
2893 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2894 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2895
2896 return ret;
2897}
2898
surmeh01bceff2f2018-03-29 16:29:27 +01002899namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002900template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002901LayerTestResult<T, 4> DivisionTestHelper(
2902 armnn::IWorkloadFactory& workloadFactory,
2903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2904 const unsigned int shape0[4],
2905 const std::vector<T>& values0,
2906 float scale0,
2907 int32_t offset0,
2908 const unsigned int shape1[4],
2909 const std::vector<T> & values1,
2910 float scale1,
2911 int32_t offset1,
2912 const unsigned int outShape[4],
2913 const std::vector<T> & outValues,
2914 float outScale,
2915 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002916{
Sadik Armagan2999a022019-04-09 14:20:12 +01002917 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2918 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2919 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002920
David Beck5cd01f32018-09-12 16:00:08 +01002921 inputTensorInfo0.SetQuantizationScale(scale0);
2922 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002923
David Beck5cd01f32018-09-12 16:00:08 +01002924 inputTensorInfo1.SetQuantizationScale(scale1);
2925 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002926
David Beck5cd01f32018-09-12 16:00:08 +01002927 outputTensorInfo.SetQuantizationScale(outScale);
2928 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002929
David Beck5cd01f32018-09-12 16:00:08 +01002930 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2931 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002932
David Beck5cd01f32018-09-12 16:00:08 +01002933 LayerTestResult<T, 4> result(outputTensorInfo);
2934 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002935
David Beck5cd01f32018-09-12 16:00:08 +01002936 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2937 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2938 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002939
David Beck5cd01f32018-09-12 16:00:08 +01002940 armnn::DivisionQueueDescriptor data;
2941 armnn::WorkloadInfo info;
2942 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2943 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2944 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002945
David Beck5cd01f32018-09-12 16:00:08 +01002946 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002947
David Beck5cd01f32018-09-12 16:00:08 +01002948 inputHandle0->Allocate();
2949 inputHandle1->Allocate();
2950 outputHandle->Allocate();
2951
2952 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2953 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2954
Derek Lambertif30f7d32019-04-09 10:25:02 +01002955 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002956 workload->Execute();
2957
2958 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2959
2960 return result;
2961}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002962} // anonymous namespace
2963
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002964LayerTestResult<float,4> DivisionByZeroTest(
2965 armnn::IWorkloadFactory& workloadFactory,
2966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002967{
2968 const unsigned int width = 2;
2969 const unsigned int height = 2;
2970 const unsigned int channelCount = 2;
2971 const unsigned int batchSize = 2;
2972
2973 unsigned int shape[] = { batchSize, channelCount, height, width };
2974
2975 std::vector<float> input0({
2976 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2977 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2978
2979 std::vector<float> input1({
2980 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2981 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2982
2983 std::vector<float> output({
2984 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2985 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2986
Sadik Armagan2999a022019-04-09 14:20:12 +01002987 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2988 memoryManager,
2989 shape, input0, 1.0f, 0,
2990 shape, input1, 1.0f, 0,
2991 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002992}
2993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002994LayerTestResult<float,4> DivisionTest(
2995 armnn::IWorkloadFactory& workloadFactory,
2996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002997{
2998 const unsigned int width = 2;
2999 const unsigned int height = 2;
3000 const unsigned int channelCount = 2;
3001 const unsigned int batchSize = 2;
3002
3003 unsigned int shape[] = { batchSize, channelCount, height, width };
3004
3005 std::vector<float> input0({
3006 2, 2, 2, 2, 3, 3, 3, 3,
3007 4, 4, 4, 4, 5, 5, 5, 5 });
3008
3009 std::vector<float> input1({
3010 1, 1, 1, 1, 2, 2, 2, 2,
3011 4, 4, 4, 4, 4, 4, 4, 4 });
3012
3013 std::vector<float> output({
3014 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
3015 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
3016
David Beck5cd01f32018-09-12 16:00:08 +01003017
Sadik Armagan2999a022019-04-09 14:20:12 +01003018 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3019 memoryManager,
3020 shape, input0, 1.0f, 0,
3021 shape, input1, 1.0f, 0,
3022 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003023}
3024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003025LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3026 armnn::IWorkloadFactory& workloadFactory,
3027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003028{
3029 unsigned int shape0[] = { 1, 2, 2, 2 };
3030 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3031
3032 unsigned int shape1[] = { 1, 1, 1, 1 };
3033 std::vector<float> input1({ 2 });
3034
3035 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3036
David Beck5cd01f32018-09-12 16:00:08 +01003037
Sadik Armagan2999a022019-04-09 14:20:12 +01003038 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3039 memoryManager,
3040 shape0, input0, 1.0f, 0,
3041 shape1, input1, 1.0f, 0,
3042 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003043}
3044
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003045LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3046 armnn::IWorkloadFactory& workloadFactory,
3047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003048{
3049 unsigned int shape0[] = { 1, 3, 3, 2 };
3050 std::vector<float> input0({
3051 1, 4, 3, 8, 5, 12,
3052 7, 16, 9, 20, 11, 24,
3053 13, 28, 15, 32, 17, 36});
3054
3055 unsigned int shape1[] = { 1, 1, 1, 2 };
3056 std::vector<float> input1({ 1, 2 });
3057
3058 std::vector<float> output({
3059 1, 2, 3, 4, 5, 6,
3060 7, 8, 9, 10, 11, 12,
3061 13, 14, 15, 16, 17, 18});
3062
Sadik Armagan2999a022019-04-09 14:20:12 +01003063 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3064 memoryManager,
3065 shape0, input0, 1.0f, 0,
3066 shape1, input1, 1.0f, 0,
3067 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003068}
3069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003070LayerTestResult<uint8_t,4> DivisionUint8Test(
3071 armnn::IWorkloadFactory& workloadFactory,
3072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003073{
3074 const unsigned int width = 2;
3075 const unsigned int height = 2;
3076 const unsigned int channelCount = 2;
3077 const unsigned int batchSize = 2;
3078
3079 unsigned int shape[] = { batchSize, channelCount, height, width };
3080
3081 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3082 4, 4, 4, 4, 5, 5, 5, 5 });
3083
3084 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3085 4, 4, 4, 4, 4, 4, 4, 4 });
3086
3087 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3088 4, 4, 4, 4, 5, 5, 5, 5});
3089
3090
Sadik Armagan2999a022019-04-09 14:20:12 +01003091 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3092 memoryManager,
3093 shape, input0, 1.0f, 0,
3094 shape, input1, 1.0f, 0,
3095 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003096}
3097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003098LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3099 armnn::IWorkloadFactory& workloadFactory,
3100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003101{
3102 unsigned int shape0[] = { 1, 2, 2, 2 };
3103 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3104
3105 unsigned int shape1[] = { 1, 1, 1, 1 };
3106 std::vector<uint8_t> input1({ 2 });
3107
3108 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3109
Sadik Armagan2999a022019-04-09 14:20:12 +01003110 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3111 memoryManager,
3112 shape0, input0, 1.0f, 0,
3113 shape1, input1, 1.0f, 0,
3114 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003115}
3116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003117LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3118 armnn::IWorkloadFactory& workloadFactory,
3119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003120{
3121 unsigned int shape0[] = { 1, 3, 3, 2 };
3122 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
3123 7, 16, 9, 20, 11, 24,
3124 13, 28, 15, 32, 17, 36});
3125
3126 unsigned int shape1[] = { 1, 1, 1, 2 };
3127 std::vector<uint8_t> input1({ 1, 2 });
3128
3129 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
3130 7, 8, 9, 10, 11, 12,
3131 13, 14, 15, 16, 17, 18});
3132
Sadik Armagan2999a022019-04-09 14:20:12 +01003133 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3134 memoryManager,
3135 shape0, input0, 1.0f, 0,
3136 shape1, input1, 1.0f, 0,
3137 shape0, output, 1.0f, 0);
3138}
3139
3140LayerTestResult<int16_t,4> DivisionInt16Test(
3141 armnn::IWorkloadFactory& workloadFactory,
3142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3143{
3144 unsigned int shape[] = { 2, 2, 2, 2 };
3145
3146 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3147 4, 4, 4, 4, 5, 5, 5, 5 });
3148
3149 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3150 4, 4, 4, 4, 4, 4, 4, 4 });
3151
3152 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3153 4, 4, 4, 4, 5, 5, 5, 5});
3154
3155
3156 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3157 memoryManager,
3158 shape, input0, 1.0f, 0,
3159 shape, input1, 1.0f, 0,
3160 shape, output, 0.25f, 0);
3161}
3162
3163LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3164 armnn::IWorkloadFactory& workloadFactory,
3165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3166{
3167 unsigned int shape0[] = { 1, 2, 2, 2 };
3168 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3169
3170 unsigned int shape1[] = { 1, 1, 1, 1 };
3171 std::vector<int16_t> input1({ 2 });
3172
3173 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3174
3175 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3176 memoryManager,
3177 shape0, input0, 1.0f, 0,
3178 shape1, input1, 1.0f, 0,
3179 shape0, output, 1.0f, 0);
3180}
3181
3182LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3183 armnn::IWorkloadFactory& workloadFactory,
3184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3185{
3186 unsigned int shape0[] = { 1, 3, 3, 2 };
3187 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
3188 7, 16, 9, 20, 11, 24,
3189 13, 28, 15, 32, 17, 36});
3190
3191 unsigned int shape1[] = { 1, 1, 1, 2 };
3192 std::vector<int16_t> input1({ 1, 2 });
3193
3194 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
3195 7, 8, 9, 10, 11, 12,
3196 13, 14, 15, 16, 17, 18});
3197
3198 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3199 memoryManager,
3200 shape0, input0, 1.0f, 0,
3201 shape1, input1, 1.0f, 0,
3202 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003203}
3204
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003205template<typename DescriptorType>
3206std::unique_ptr<armnn::IWorkload> CreateWorkload(
3207 const armnn::IWorkloadFactory& workloadFactory,
3208 const armnn::WorkloadInfo& info,
3209 const DescriptorType& descriptor)
3210{
3211 return CreateWorkload(workloadFactory, info, descriptor);
3212};
3213
3214template<>
3215std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3216 const armnn::IWorkloadFactory& workloadFactory,
3217 const armnn::WorkloadInfo& info,
3218 const armnn::MaximumQueueDescriptor& descriptor)
3219{
3220 return workloadFactory.CreateMaximum(descriptor, info);
3221}
3222
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003223template<>
3224std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3225 const armnn::IWorkloadFactory& workloadFactory,
3226 const armnn::WorkloadInfo& info,
3227 const armnn::MinimumQueueDescriptor& descriptor)
3228{
3229 return workloadFactory.CreateMinimum(descriptor, info);
3230}
3231
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003232template<>
3233std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3234 const armnn::IWorkloadFactory& workloadFactory,
3235 const armnn::WorkloadInfo& info,
3236 const armnn::EqualQueueDescriptor& descriptor)
3237{
3238 return workloadFactory.CreateEqual(descriptor, info);
3239}
3240
FrancisMurtagh878f0232018-12-19 10:56:15 +00003241template<>
3242std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3243 const armnn::IWorkloadFactory& workloadFactory,
3244 const armnn::WorkloadInfo& info,
3245 const armnn::GreaterQueueDescriptor& descriptor)
3246{
3247 return workloadFactory.CreateGreater(descriptor, info);
3248}
3249
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003250namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00003251
3252template <typename Descriptor,
3253 armnn::DataType ArmnnTypeInput,
3254 armnn::DataType ArmnnTypeOutput,
3255 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3256 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3257LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3258 armnn::IWorkloadFactory & workloadFactory,
3259 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3260 const unsigned int shape0[4], std::vector<TInput> values0,
3261 const unsigned int shape1[4], std::vector<TInput> values1,
3262 const unsigned int outShape[4], std::vector<TOutput> outValues,
3263 float qScale = 0.0f, int qOffset = 0)
3264{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01003265 const uint32_t dimensionCount = 4;
kevmay012b4d88e2019-01-24 14:05:09 +00003266 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3267 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3268 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3269
3270 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3271 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3272
3273 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003274 {
kevmay012b4d88e2019-01-24 14:05:09 +00003275 inputTensorInfo0.SetQuantizationScale(qScale);
3276 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003277
kevmay012b4d88e2019-01-24 14:05:09 +00003278 inputTensorInfo1.SetQuantizationScale(qScale);
3279 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003280
kevmay012b4d88e2019-01-24 14:05:09 +00003281 outputTensorInfo.SetQuantizationScale(qScale);
3282 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003283 }
kevmay012b4d88e2019-01-24 14:05:09 +00003284
3285 LayerTestResult<TOutput,4> ret(outputTensorInfo);
3286
3287 if(ArmnnTypeOutput == armnn::DataType::Boolean)
3288 {
3289 ret.compareBoolean = true;
3290 }
3291
3292 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3293 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3294 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3295
3296 Descriptor data;
3297 armnn::WorkloadInfo info;
3298 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3299 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3300 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3301 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3302
3303 inputHandle0->Allocate();
3304 inputHandle1->Allocate();
3305 outputHandle->Allocate();
3306
3307 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3308 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3309
Derek Lambertif30f7d32019-04-09 10:25:02 +01003310 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00003311 ExecuteWorkload(*workload, memoryManager);
3312
3313 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3314
3315 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3316 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003317}
3318
kevmay012b4d88e2019-01-24 14:05:09 +00003319template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3320LayerTestResult<T, 4> ElementwiseTestHelper(
3321 armnn::IWorkloadFactory & workloadFactory,
3322 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3323 const unsigned int shape0[4], std::vector<T> values0,
3324 const unsigned int shape1[4], std::vector<T> values1,
3325 const unsigned int outShape[4], std::vector<T> outValues,
3326 float qScale = 0.0f, int qOffset = 0)
3327{
3328 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3329 (workloadFactory,
3330 memoryManager,
3331 shape0,
3332 values0,
3333 shape1,
3334 values1,
3335 outShape,
3336 outValues,
3337 qScale,
3338 qOffset);
3339}
3340}
3341
3342LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003344{
3345 const unsigned int width = 2;
3346 const unsigned int height = 2;
3347 const unsigned int channelCount = 2;
3348 const unsigned int batchSize = 2;
3349
3350 unsigned int shape[] = { batchSize, channelCount, height, width };
3351
3352 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3353 3, 3, 3, 3, 4, 4, 4, 4 });
3354
3355 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3356 5, 5, 5, 5, 4, 4, 4, 4 });
3357
kevmay012b4d88e2019-01-24 14:05:09 +00003358 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3359 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003360
kevmay012b4d88e2019-01-24 14:05:09 +00003361 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003362 workloadFactory,
3363 memoryManager,
3364 shape,
3365 input0,
3366 shape,
3367 input1,
3368 shape,
3369 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003370}
3371
kevmay012b4d88e2019-01-24 14:05:09 +00003372LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003373 armnn::IWorkloadFactory& workloadFactory,
3374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3375{
3376 unsigned int shape0[] = { 1, 2, 2, 2 };
3377 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3378
3379 unsigned int shape1[] = { 1, 1, 1, 1 };
3380 std::vector<float> input1({ 1 });
3381
kevmay012b4d88e2019-01-24 14:05:09 +00003382 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003383
kevmay012b4d88e2019-01-24 14:05:09 +00003384 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003385 workloadFactory,
3386 memoryManager,
3387 shape0,
3388 input0,
3389 shape1,
3390 input1,
3391 shape0,
3392 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003393}
3394
kevmay012b4d88e2019-01-24 14:05:09 +00003395LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003396 armnn::IWorkloadFactory& workloadFactory,
3397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3398{
3399 const unsigned int shape0[] = { 1, 2, 2, 3 };
3400 const unsigned int shape1[] = { 1, 1, 1, 3 };
3401
3402 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3403 7, 8, 9, 10, 11, 12 });
3404
3405 std::vector<float> input1({ 1, 2, 3});
3406
kevmay012b4d88e2019-01-24 14:05:09 +00003407 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3408 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003409
kevmay012b4d88e2019-01-24 14:05:09 +00003410 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003411 workloadFactory,
3412 memoryManager,
3413 shape0,
3414 input0,
3415 shape1,
3416 input1,
3417 shape0,
3418 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003419}
3420
3421LayerTestResult<uint8_t, 4> EqualUint8Test(
3422 armnn::IWorkloadFactory& workloadFactory,
3423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3424{
3425 unsigned int shape[] = { 2, 2, 2, 2 };
3426
3427 // See dequantized values to the right.
3428 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003429 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003430
3431 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3432 3, 3, 3, 3, 5, 5, 5, 5 });
3433
3434 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3435 1, 1, 1, 1, 0, 0, 0, 0 });
3436
kevmay012b4d88e2019-01-24 14:05:09 +00003437 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3438 armnn::DataType::QuantisedAsymm8,
3439 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003440 workloadFactory,
3441 memoryManager,
3442 shape,
3443 input0,
3444 shape,
3445 input1,
3446 shape,
3447 output,
3448 1.0f,
3449 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003450}
3451
3452LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3453 armnn::IWorkloadFactory& workloadFactory,
3454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3455{
3456 const unsigned int shape0[] = { 1, 2, 2, 3 };
3457 const unsigned int shape1[] = { 1, 1, 1, 1 };
3458
3459 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3460 7, 8, 9, 10, 11, 12 });
3461
3462 std::vector<uint8_t> input1({ 1 });
3463
3464 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3465 0, 0, 0, 0, 0, 0 });
3466
kevmay012b4d88e2019-01-24 14:05:09 +00003467 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3468 armnn::DataType::QuantisedAsymm8,
3469 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003470 workloadFactory,
3471 memoryManager,
3472 shape0,
3473 input0,
3474 shape1,
3475 input1,
3476 shape0,
3477 output,
3478 1.0f,
3479 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003480}
3481
3482LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3483 armnn::IWorkloadFactory& workloadFactory,
3484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3485{
3486 const unsigned int shape0[] = { 1, 2, 2, 3 };
3487 const unsigned int shape1[] = { 1, 1, 1, 3 };
3488
3489 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3490 7, 8, 9, 10, 11, 12 });
3491
3492 std::vector<uint8_t> input1({ 1, 1, 3});
3493
3494 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3495 0, 0, 0, 0, 0, 0 });
3496
kevmay012b4d88e2019-01-24 14:05:09 +00003497 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3498 armnn::DataType::QuantisedAsymm8,
3499 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003500 workloadFactory,
3501 memoryManager,
3502 shape0,
3503 input0,
3504 shape1,
3505 input1,
3506 shape0,
3507 output,
3508 1.0f,
3509 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003510}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003511
kevmay012b4d88e2019-01-24 14:05:09 +00003512LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3514{
3515 const unsigned int width = 2;
3516 const unsigned int height = 2;
3517 const unsigned int channelCount = 2;
3518 const unsigned int batchSize = 2;
3519
3520 unsigned int shape[] = { batchSize, channelCount, height, width };
3521
3522 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3523 3, 3, 3, 3, 4, 4, 4, 4 });
3524
3525 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3526 5, 5, 5, 5, 4, 4, 4, 4 });
3527
kevmay012b4d88e2019-01-24 14:05:09 +00003528 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3529 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003530
kevmay012b4d88e2019-01-24 14:05:09 +00003531 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003532 workloadFactory,
3533 memoryManager,
3534 shape,
3535 input0,
3536 shape,
3537 input1,
3538 shape,
3539 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003540}
3541
kevmay012b4d88e2019-01-24 14:05:09 +00003542LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003543 armnn::IWorkloadFactory& workloadFactory,
3544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3545{
3546 unsigned int shape0[] = { 1, 2, 2, 2 };
3547 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3548
3549 unsigned int shape1[] = { 1, 1, 1, 1 };
3550 std::vector<float> input1({ 1 });
3551
kevmay012b4d88e2019-01-24 14:05:09 +00003552 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003553
kevmay012b4d88e2019-01-24 14:05:09 +00003554 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003555 workloadFactory,
3556 memoryManager,
3557 shape0,
3558 input0,
3559 shape1,
3560 input1,
3561 shape0,
3562 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003563}
3564
kevmay012b4d88e2019-01-24 14:05:09 +00003565LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003566 armnn::IWorkloadFactory& workloadFactory,
3567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3568{
3569 const unsigned int shape0[] = { 1, 2, 2, 3 };
3570 const unsigned int shape1[] = { 1, 1, 1, 3 };
3571
3572 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3573 7, 8, 9, 10, 11, 12 });
3574
3575 std::vector<float> input1({ 1, 3, 2});
3576
kevmay012b4d88e2019-01-24 14:05:09 +00003577 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3578 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003579
kevmay012b4d88e2019-01-24 14:05:09 +00003580 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003581 workloadFactory,
3582 memoryManager,
3583 shape0,
3584 input0,
3585 shape1,
3586 input1,
3587 shape0,
3588 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003589}
3590
3591LayerTestResult<uint8_t, 4> GreaterUint8Test(
3592 armnn::IWorkloadFactory& workloadFactory,
3593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3594{
3595 unsigned int shape[] = { 2, 2, 2, 2 };
3596
3597 // See dequantized values to the right.
3598 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3599 3, 3, 3, 3, 5, 5, 5, 5 });
3600
3601 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3602 2, 2, 2, 2, 5, 5, 5, 5 });
3603
3604 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3605 1, 1, 1, 1, 0, 0, 0, 0 });
3606
kevmay012b4d88e2019-01-24 14:05:09 +00003607 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3608 armnn::DataType::QuantisedAsymm8,
3609 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003610 workloadFactory,
3611 memoryManager,
3612 shape,
3613 input0,
3614 shape,
3615 input1,
3616 shape,
3617 output,
3618 1.0f,
3619 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003620}
3621
3622LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3623 armnn::IWorkloadFactory& workloadFactory,
3624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3625{
3626 const unsigned int shape0[] = { 1, 2, 2, 3 };
3627 const unsigned int shape1[] = { 1, 1, 1, 1 };
3628
3629 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3630 7, 8, 9, 10, 11, 12 });
3631
3632 std::vector<uint8_t> input1({ 1 });
3633
3634 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3635 1, 1, 1, 1, 1, 1 });
3636
kevmay012b4d88e2019-01-24 14:05:09 +00003637 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3638 armnn::DataType::QuantisedAsymm8,
3639 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003640 workloadFactory,
3641 memoryManager,
3642 shape0,
3643 input0,
3644 shape1,
3645 input1,
3646 shape0,
3647 output,
3648 1.0f,
3649 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003650}
3651
3652LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3653 armnn::IWorkloadFactory& workloadFactory,
3654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3655{
3656 const unsigned int shape0[] = { 1, 2, 2, 3 };
3657 const unsigned int shape1[] = { 1, 1, 1, 3 };
3658
3659 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3660 7, 8, 9, 10, 11, 12 });
3661
3662 std::vector<uint8_t> input1({ 1, 1, 3});
3663
3664 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3665 1, 1, 1, 1, 1, 1 });
3666
kevmay012b4d88e2019-01-24 14:05:09 +00003667 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3668 armnn::DataType::QuantisedAsymm8,
3669 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003670 workloadFactory,
3671 memoryManager,
3672 shape0,
3673 input0,
3674 shape1,
3675 input1,
3676 shape0,
3677 output,
3678 1.0f,
3679 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003680}
3681
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003682LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3684{
3685 const unsigned int width = 2;
3686 const unsigned int height = 2;
3687 const unsigned int channelCount = 2;
3688 const unsigned int batchSize = 2;
3689
3690 unsigned int shape[] = { batchSize, channelCount, height, width };
3691
3692 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3693 3, 3, 3, 3, 4, 4, 4, 4 });
3694
3695 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3696 4, 4, 4, 4, 5, 5, 5, 5 });
3697
3698 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3699 4, 4, 4, 4, 5, 5, 5, 5 });
3700
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003701 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3702 workloadFactory,
3703 memoryManager,
3704 shape,
3705 input0,
3706 shape,
3707 input1,
3708 shape,
3709 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003710}
3711
3712LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3713 armnn::IWorkloadFactory& workloadFactory,
3714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3715{
3716 unsigned int shape0[] = { 1, 2, 2, 2 };
3717 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3718
3719 unsigned int shape1[] = { 1, 1, 1, 1 };
3720 std::vector<float> input1({ 2 });
3721
3722 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3723
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003724 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3725 workloadFactory,
3726 memoryManager,
3727 shape0,
3728 input0,
3729 shape1,
3730 input1,
3731 shape0,
3732 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003733}
3734
3735LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3736 armnn::IWorkloadFactory& workloadFactory,
3737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3738{
3739 const unsigned int shape0[] = { 1, 2, 2, 3 };
3740 const unsigned int shape1[] = { 1, 1, 1, 3 };
3741
3742 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3743 7, 8, 9, 10, 11, 12 });
3744
3745 std::vector<float> input1({ 1, 2, 3});
3746
3747 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003748 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003749
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003750 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3751 workloadFactory,
3752 memoryManager,
3753 shape0,
3754 input0,
3755 shape1,
3756 input1,
3757 shape0,
3758 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003759}
3760
3761LayerTestResult<uint8_t, 4> MaximumUint8Test(
3762 armnn::IWorkloadFactory& workloadFactory,
3763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3764{
3765 unsigned int shape[] = { 2, 2, 2, 2 };
3766
3767 // See dequantized values to the right.
3768 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3769 3, 3, 3, 3, 4, 4, 4, 4 });
3770
3771 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3772 4, 4, 4, 4, 5, 5, 5, 5 });
3773
3774 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3775 4, 4, 4, 4, 5, 5, 5, 5 });
3776
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003777 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3778 workloadFactory,
3779 memoryManager,
3780 shape,
3781 input0,
3782 shape,
3783 input1,
3784 shape,
3785 output,
3786 1.0f,
3787 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003788}
3789
3790LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3791 armnn::IWorkloadFactory& workloadFactory,
3792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3793{
3794 const unsigned int shape0[] = { 1, 2, 2, 3 };
3795 const unsigned int shape1[] = { 1, 1, 1, 1 };
3796
3797 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3798 7, 8, 9, 10, 11, 12 });
3799
3800 std::vector<uint8_t> input1({2});
3801
3802 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3803 7, 8, 9, 10, 11, 12 });
3804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003805 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3806 workloadFactory,
3807 memoryManager,
3808 shape0,
3809 input0,
3810 shape1,
3811 input1,
3812 shape0,
3813 output,
3814 1.0f,
3815 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003816}
3817
3818LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3819 armnn::IWorkloadFactory& workloadFactory,
3820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3821{
3822 const unsigned int shape0[] = { 1, 2, 2, 3 };
3823 const unsigned int shape1[] = { 1, 1, 1, 3 };
3824
3825 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3826 7, 8, 9, 10, 11, 12 });
3827
3828 std::vector<uint8_t> input1({ 1, 10, 3});
3829
3830 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3831 7, 10, 9, 10, 11, 12 });
3832
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003833 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3834 workloadFactory,
3835 memoryManager,
3836 shape0,
3837 input0,
3838 shape1,
3839 input1,
3840 shape0,
3841 output,
3842 1.0f,
3843 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003844}
3845
Sadik Armagan2999a022019-04-09 14:20:12 +01003846LayerTestResult<int16_t, 4> MaximumInt16Test(
3847 armnn::IWorkloadFactory& workloadFactory,
3848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3849{
3850 unsigned int shape[] = { 2, 2, 2, 2 };
3851
3852 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3853 3, 3, 3, 3, 4, 4, 4, 4 });
3854
3855 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3856 4, 4, 4, 4, 5, 5, 5, 5 });
3857
3858 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3859 4, 4, 4, 4, 5, 5, 5, 5 });
3860
3861 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3862 workloadFactory,
3863 memoryManager,
3864 shape,
3865 input0,
3866 shape,
3867 input1,
3868 shape,
3869 output,
3870 1.0f,
3871 0);
3872}
3873
3874LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3875 armnn::IWorkloadFactory& workloadFactory,
3876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3877{
3878 const unsigned int shape0[] = { 1, 2, 2, 3 };
3879 const unsigned int shape1[] = { 1, 1, 1, 1 };
3880
3881 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3882 7, 8, 9, 10, 11, 12 });
3883
3884 std::vector<int16_t> input1({2});
3885
3886 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3887 7, 8, 9, 10, 11, 12 });
3888
3889 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3890 workloadFactory,
3891 memoryManager,
3892 shape0,
3893 input0,
3894 shape1,
3895 input1,
3896 shape0,
3897 output,
3898 1.0f,
3899 0);
3900}
3901
3902LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3903 armnn::IWorkloadFactory& workloadFactory,
3904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3905{
3906 const unsigned int shape0[] = { 1, 2, 2, 3 };
3907 const unsigned int shape1[] = { 1, 1, 1, 3 };
3908
3909 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3910 7, 8, 9, 10, 11, 12 });
3911
3912 std::vector<int16_t> input1({ 1, 10, 3});
3913
3914 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3915 7, 10, 9, 10, 11, 12 });
3916
3917 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3918 workloadFactory,
3919 memoryManager,
3920 shape0,
3921 input0,
3922 shape1,
3923 input1,
3924 shape0,
3925 output,
3926 1.0f,
3927 0);
3928}
3929
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003930LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3931 armnn::IWorkloadFactory& workloadFactory,
3932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3933{
3934 unsigned int shape0[] = { 1, 2, 2, 2 };
3935 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3936
3937 unsigned int shape1[] = { 1, 1, 1, 1 };
3938 std::vector<float> input1({ 2 });
3939
3940 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3941
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003942 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3943 workloadFactory,
3944 memoryManager,
3945 shape0,
3946 input0,
3947 shape1,
3948 input1,
3949 shape0,
3950 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003951}
3952
3953
3954LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3955 armnn::IWorkloadFactory& workloadFactory,
3956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3957{
3958 unsigned int shape0[] = { 1, 2, 2, 2 };
3959 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3960
3961 unsigned int shape1[] = { 1, 1, 1, 1 };
3962 std::vector<float> input1({ 5 });
3963
3964 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3965
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003966 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3967 workloadFactory,
3968 memoryManager,
3969 shape0,
3970 input0,
3971 shape1,
3972 input1,
3973 shape0,
3974 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003975}
3976
3977LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3978 armnn::IWorkloadFactory & workloadFactory,
3979 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3980{
3981 const unsigned int shape0[] = { 1, 2, 2, 3 };
3982 const unsigned int shape1[] = { 1, 1, 1, 3 };
3983
3984 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3985 7, 1, 2, 3, 4, 5 });
3986
3987 std::vector<uint8_t> input1({ 1, 2, 3});
3988
3989 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3990 1, 1, 2, 1, 2, 3 });
3991
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003992 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3993 workloadFactory,
3994 memoryManager,
3995 shape0,
3996 input0,
3997 shape1,
3998 input1,
3999 shape0,
4000 output,
4001 1.0f,
4002 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004003}
4004
Sadik Armagan2999a022019-04-09 14:20:12 +01004005LayerTestResult<int16_t, 4> MinimumInt16Test(
4006 armnn::IWorkloadFactory& workloadFactory,
4007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4008{
4009 unsigned int shape[] = { 2, 2, 2, 2 };
4010
4011 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4012 3, 3, 3, 3, 4, 4, 4, 4 });
4013
4014 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4015 4, 4, 4, 4, 5, 5, 5, 5 });
4016
4017 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4018 3, 3, 3, 3, 4, 4, 4, 4 });
4019
4020 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4021 workloadFactory,
4022 memoryManager,
4023 shape,
4024 input0,
4025 shape,
4026 input1,
4027 shape,
4028 output,
4029 1.0f,
4030 0);
4031}
4032
4033LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4034 armnn::IWorkloadFactory& workloadFactory,
4035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4036{
4037 const unsigned int shape0[] = { 1, 2, 2, 3 };
4038 const unsigned int shape1[] = { 1, 1, 1, 1 };
4039
4040 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4041 7, 8, 9, 10, 11, 12 });
4042
4043 std::vector<int16_t> input1({2});
4044
4045 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4046 2, 2, 2, 2, 2, 2 });
4047
4048 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4049 workloadFactory,
4050 memoryManager,
4051 shape0,
4052 input0,
4053 shape1,
4054 input1,
4055 shape0,
4056 output,
4057 1.0f,
4058 0);
4059}
4060
4061LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4062 armnn::IWorkloadFactory& workloadFactory,
4063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4064{
4065 const unsigned int shape0[] = { 1, 2, 2, 3 };
4066 const unsigned int shape1[] = { 1, 1, 1, 3 };
4067
4068 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4069 7, 8, 9, 10, 11, 12 });
4070
4071 std::vector<int16_t> input1({ 1, 10, 3});
4072
4073 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4074 1, 8, 3, 1, 10, 3 });
4075
4076 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4077 workloadFactory,
4078 memoryManager,
4079 shape0,
4080 input0,
4081 shape1,
4082 input1,
4083 shape0,
4084 output,
4085 1.0f,
4086 0);
4087}
4088
Francis Murtaghe7a86a42018-08-29 12:42:10 +01004089namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004090LayerTestResult<float,4> MultiplicationTestHelper(
4091 armnn::IWorkloadFactory& workloadFactory,
4092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4093 const unsigned int shape0[4],
4094 const std::vector<float> & values0,
4095 const unsigned int shape1[4],
4096 const std::vector<float> & values1,
4097 const unsigned int outShape[4],
4098 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00004099{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01004100 const uint32_t dimensionCount = 4;
surmeh01bceff2f2018-03-29 16:29:27 +01004101 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
4102 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
4103 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00004104
surmeh01bceff2f2018-03-29 16:29:27 +01004105 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
4106 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004107
4108 LayerTestResult<float,4> ret(outputTensorInfo);
4109
4110 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4111 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4112 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4113
4114 armnn::MultiplicationQueueDescriptor data;
4115 armnn::WorkloadInfo info;
4116 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4117 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4118 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4119
4120 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4121
4122 inputHandle0->Allocate();
4123 inputHandle1->Allocate();
4124 outputHandle->Allocate();
4125
4126 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4127 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4128
Derek Lambertif30f7d32019-04-09 10:25:02 +01004129 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004130 workload->Execute();
4131
4132 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4133
surmeh01bceff2f2018-03-29 16:29:27 +01004134 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004135 return ret;
4136}
surmeh01bceff2f2018-03-29 16:29:27 +01004137} // anonymous namespace
4138
4139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004140LayerTestResult<float,4> MultiplicationTest(
4141 armnn::IWorkloadFactory& workloadFactory,
4142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004143{
4144 const unsigned int width = 2;
4145 const unsigned int height = 2;
4146 const unsigned int channelCount = 2;
4147 const unsigned int batchSize = 2;
4148
4149 unsigned int shape[] = { batchSize, channelCount, height, width };
4150
4151 std::vector<float> input0({
4152 1, 1, 1, 1, 2, 2, 2, 2,
4153 3, 3, 3, 3, 4, 4, 4, 4 });
4154
4155 std::vector<float> input1({
4156 2, 2, 2, 2, 3, 3, 3, 3,
4157 4, 4, 4, 4, 5, 5, 5, 5 });
4158
4159 std::vector<float> output({
4160 2, 2, 2, 2, 6, 6, 6, 6,
4161 12, 12, 12, 12, 20, 20, 20, 20 });
4162
4163 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004164 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004165 shape,
4166 input0,
4167 shape,
4168 input1,
4169 shape,
4170 output);
4171}
4172
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004173LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4174 armnn::IWorkloadFactory& workloadFactory,
4175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004176{
4177 unsigned int shape0[] = { 1, 2, 2, 2 };
4178 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4179
4180 unsigned int shape1[] = { 1, 1, 1, 1 };
4181 std::vector<float> input1({ 2 });
4182
4183 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4184
4185 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004186 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004187 shape0,
4188 input0,
4189 shape1,
4190 input1,
4191 shape0,
4192 output);
4193}
4194
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004195LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4196 armnn::IWorkloadFactory& workloadFactory,
4197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004198{
4199 unsigned int shape0[] = { 1, 3, 3, 2 };
4200 std::vector<float> input0({
4201 1, 2, 3, 4, 5, 6,
4202 7, 8, 9, 10, 11, 12,
4203 13, 14, 15, 16, 17, 18});
4204
4205 unsigned int shape1[] = { 1, 1, 1, 2 };
4206 std::vector<float> input1({ 1, 2 });
4207
4208 std::vector<float> output({
4209 1, 4, 3, 8, 5, 12,
4210 7, 16, 9, 20, 11, 24,
4211 13, 28, 15, 32, 17, 36});
4212
4213 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004214 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01004215 shape0,
4216 input0,
4217 shape1,
4218 input1,
4219 shape0,
4220 output);
4221}
telsoa014fcda012018-03-09 14:13:49 +00004222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004223LayerTestResult<float,4> CompareMultiplicationTest(
4224 armnn::IWorkloadFactory& workloadFactory,
4225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4226 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004227{
4228 const unsigned int width = 16;
4229 const unsigned int height = 32;
4230 const unsigned int channelCount = 2;
4231 const unsigned int batchSize = 5;
4232
4233 armnn::TensorInfo inputTensorInfo0;
4234 armnn::TensorInfo inputTensorInfo1;
4235 armnn::TensorInfo outputTensorInfo;
4236
4237 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4238
4239 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4240 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4241 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4242
4243 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4244
4245 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4246 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4247
4248 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4249 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4250 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4251
4252 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4253 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4254 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4255
4256 armnn::MultiplicationQueueDescriptor data;
4257 armnn::WorkloadInfo info;
4258 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4259 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4260 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4261
4262 armnn::MultiplicationQueueDescriptor refData = data;
4263 armnn::WorkloadInfo refInfo = info;
4264 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4265 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4266 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4267
4268 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4269 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4270
4271 inputHandle0->Allocate();
4272 inputHandle1->Allocate();
4273 outputHandle->Allocate();
4274 inputHandle0Ref->Allocate();
4275 inputHandle1Ref->Allocate();
4276 outputHandleRef->Allocate();
4277
4278 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4279 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4280 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4281 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4282
Derek Lambertif30f7d32019-04-09 10:25:02 +01004283 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004284 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004285 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004286 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00004287 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4288 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4289
4290 return comparisonResult;
4291}
4292
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004293LayerTestResult<float,4> CompareBatchNormTest(
4294 armnn::IWorkloadFactory& workloadFactory,
4295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4296 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004297{
4298 const unsigned int width = 2;
4299 const unsigned int height = 3;
4300 const unsigned int channels = 5;
4301 const unsigned int batchSize = 3;
4302
4303 armnn::TensorInfo inputTensorInfo;
4304 armnn::TensorInfo outputTensorInfo;
4305 armnn::TensorInfo tensorInfo;
4306
4307 constexpr unsigned int shape[] = {batchSize, channels, height, width};
4308 constexpr unsigned int tensorShape[] = {channels};
4309
4310 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4311 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4312 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4313
4314 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4315
4316 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
4317 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4318 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
4319 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
4320
4321 LayerTestResult<float,4> ret(outputTensorInfo);
4322
4323 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4324 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4325
4326 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4327 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4328
4329 armnn::BatchNormalizationQueueDescriptor data;
4330 armnn::WorkloadInfo info;
4331 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4332 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4333 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4334 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4335
4336 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4337 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4338 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4339 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4340
4341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4343 data.m_Mean = &meanTensor;
4344 data.m_Variance = &varianceTensor;
4345 data.m_Beta = &betaTensor;
4346 data.m_Gamma = &gammaTensor;
4347 data.m_Parameters.m_Eps = 0.01f;
4348
4349 armnn::BatchNormalizationQueueDescriptor refData = data;
4350 armnn::WorkloadInfo refInfo = info;
4351 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4352 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4353
4354 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4355 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4356
4357 inputHandle->Allocate();
4358 outputHandle->Allocate();
4359 inputHandleRef->Allocate();
4360 outputHandleRef->Allocate();
4361
4362 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4363 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4364
Derek Lambertif30f7d32019-04-09 10:25:02 +01004365 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004366 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004367 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004368 workloadRef->Execute();
4369
4370 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4371 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4372
4373 return ret;
4374}
4375
surmeh013537c2c2018-05-18 16:31:43 +01004376template<typename T>
4377void PermuteTensorData(
4378 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004380 const armnn::PermutationVector& mappings,
4381 armnn::TensorInfo & inputTensorInfo,
4382 const T * inputData,
4383 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004384{
surmeh013537c2c2018-05-18 16:31:43 +01004385 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4386 if (inputData == nullptr)
4387 {
4388 // Nullptr is an error in the test. By returning without doing the concatenation
4389 // I expect the caller to fail the test. It still makes sense to report this as
4390 // an assert for Debug builds.
4391 return;
4392 }
telsoa014fcda012018-03-09 14:13:49 +00004393
surmeh013537c2c2018-05-18 16:31:43 +01004394 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4395
4396 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4397 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4398
4399 armnn::PermuteQueueDescriptor queueDescriptor;
4400 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4401 armnn::WorkloadInfo workloadInfo;
4402 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4403 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4404
4405 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4406
4407 inputHandle->Allocate();
4408 outputHandle->Allocate();
4409
4410 CopyDataToITensorHandle(inputHandle.get(), inputData);
4411
Derek Lambertif30f7d32019-04-09 10:25:02 +01004412 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004413 workload->Execute();
4414
4415 outputData.resize(outputTensorInfo.GetNumElements());
4416 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4417 inputTensorInfo = outputTensorInfo;
4418}
4419
Jim Flynn825af452019-05-20 12:49:28 +01004420armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004421 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4422 unsigned int concatDim)
4423{
telsoa014fcda012018-03-09 14:13:49 +00004424 std::vector<armnn::TensorShape> shapes;
4425 shapes.reserve(inputTensorInfos.size());
4426 for (const armnn::TensorInfo& it: inputTensorInfos)
4427 {
4428 shapes.push_back(it.GetShape());
4429 }
surmeh013537c2c2018-05-18 16:31:43 +01004430
Jim Flynn825af452019-05-20 12:49:28 +01004431 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4432 shapes.end(),
4433 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004434}
4435
4436//
narpra015cdda352018-11-19 15:30:27 +00004437// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4438// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4439// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004440//
4441
4442bool NeedPermuteForConcat(
4443 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4444 unsigned int concatDim)
4445{
4446 // See note above. Additionally we expect the input shapes to have the
4447 // same number of dimensions.
4448 unsigned int nDimensions = 0;
4449
telsoa01c577f2c2018-08-31 09:22:23 +01004450 // Determine the number of dimensions as well as sanity check them
4451 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004452 for (auto && tensorInfo : inputTensorInfos)
4453 {
4454 if (!nDimensions)
4455 {
4456 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4457 }
4458 else
4459 {
4460 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4461 "Input shapes must have the same number of dimensions");
4462 }
4463 }
4464
narpra015cdda352018-11-19 15:30:27 +00004465 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004466}
4467
4468armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4469{
4470 unsigned int numDims = inputShape.GetNumDimensions();
4471 if (numDims >= 3)
4472 {
4473 // Nothing to do if the inputShape has at least 3 dimensions.
4474 return inputShape;
4475 }
4476
4477 std::vector<unsigned int> newDims(size_t(3), 1u);
4478 unsigned int expandedBy = 3 - numDims;
4479 for (unsigned int i=0; i<numDims; ++i)
4480 {
4481 newDims[expandedBy+i] = inputShape[i];
4482 }
4483 return armnn::TensorShape(3u, &newDims[0]);
4484}
4485
4486void Generate3dPermuteVectorForConcat(
4487 unsigned int numDimensions,
4488 unsigned int & concatDim,
4489 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4490{
4491 BOOST_ASSERT_MSG(numDimensions <= 3,
4492 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004493 unsigned int expandedBy = 3 - numDimensions;
4494 unsigned int expandedConcatAxis = concatDim + expandedBy;
4495
4496 if (expandedConcatAxis == 2)
4497 {
4498 concatDim = 0;
4499 armnn::PermutationVector forwardPermutation({1, 2, 0});
4500 armnn::PermutationVector reversePermutation({2, 0, 1});
4501 permutations = std::make_pair(forwardPermutation, reversePermutation);
4502 }
4503 else if (expandedConcatAxis == 1)
4504 {
4505 concatDim = 0;
4506 armnn::PermutationVector forwardPermutation({2, 0, 1});
4507 armnn::PermutationVector reversePermutation({1, 2, 0});
4508 permutations = std::make_pair(forwardPermutation, reversePermutation);
4509 }
4510 else
4511 {
4512 BOOST_ASSERT(expandedConcatAxis == 0);
4513 concatDim = 0;
4514 }
4515}
4516
4517//
4518// Permute the input tensors so we can do a supported concatenation.
4519// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4520// at the front. Finally this function tells what the output shape
4521// of the permuted concatenated tensor is going to be.
4522//
4523template <typename T>
4524void PermuteInputsForConcat(
4525 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004527 std::vector<armnn::TensorInfo> & inputTensorInfos,
4528 std::vector<T *> & inputData,
4529 std::vector<std::vector<T>> & inputDataStorage,
4530 armnn::PermutationVector & permuteVector,
4531 unsigned int & concatDim,
4532 armnn::TensorInfo & outputTensorInfo)
4533{
4534 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4535 "Expecting more than one tensor to be concatenated here");
4536
4537 unsigned int numDims = 0;
4538 unsigned int nthInput = 0;
4539 const armnn::PermutationVector identity({0, 1, 2});
4540
4541 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4542 std::make_pair(identity, identity);
4543
4544 inputDataStorage.resize(inputData.size());
4545
4546 for (auto && tensorInfo : inputTensorInfos)
4547 {
4548 if (numDims == 0)
4549 {
4550 numDims = tensorInfo.GetShape().GetNumDimensions();
4551 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004552
telsoa01c577f2c2018-08-31 09:22:23 +01004553 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004554 permuteVector = permutations.second;
4555 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4556 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4557 }
4558 else
4559 {
4560 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4561 "All inputs must have the same number of dimensions");
4562 }
4563
4564 armnn::TensorInfo newTensorInfo = tensorInfo;
4565 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4566
4567 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004568 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004569 permutations.first,
4570 newTensorInfo,
4571 inputData[nthInput],
4572 inputDataStorage[nthInput]);
4573
4574 inputData[nthInput] = inputDataStorage[nthInput].data();
4575 inputTensorInfos[nthInput] = newTensorInfo;
4576
4577 ++nthInput;
4578 }
4579
4580 outputTensorInfo.SetShape(
4581 armnnUtils::Permuted(
4582 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4583 permutations.first));
4584}
4585
4586
4587//
4588// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004589// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004590// output.
4591//
4592template <typename T>
4593void PermuteOutputForConcat(
4594 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004596 const armnn::TensorInfo & tensorInfo,
4597 const armnn::PermutationVector & permuteVector,
4598 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4599 T * data)
4600{
4601 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4602 if (data == nullptr)
4603 {
4604 // Nullptr is an error in the test. By returning without doing the permutation
4605 // I expect the caller to fail the test. It still makes sense to report this as
4606 // an assert for Debug builds.
4607 return;
4608 }
4609
4610 armnn::TensorInfo resultTensorInfo = tensorInfo;
4611 std::vector<T> inputData(tensorInfo.GetNumElements());
4612 std::vector<T> outputData;
4613
4614 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4615
4616 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004617 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004618 permuteVector,
4619 resultTensorInfo,
4620 &inputData[0],
4621 outputData);
4622
4623 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4624}
4625
4626template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004627void Concatenate(
4628 armnn::IWorkloadFactory& workloadFactory,
4629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4630 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4631 std::initializer_list<T *> inputsOrig,
4632 const armnn::TensorInfo& outputTensorInfoOrig,
4633 T * output,
narpra015cdda352018-11-19 15:30:27 +00004634 unsigned int concatDim,
4635 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004636{
4637 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4638 if (output == nullptr)
4639 {
4640 // Nullptr is an error in the test. By returning without doing the permutation
4641 // I expect the caller to fail the test. It still makes sense to report this as
4642 // an assert for Debug builds.
4643 return;
4644 }
4645
telsoa01c577f2c2018-08-31 09:22:23 +01004646 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004647 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4648 std::vector<T *> inputs = inputsOrig;
4649 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4650
4651 armnn::PermutationVector permuteVector{0, 1, 2};
4652
telsoa01c577f2c2018-08-31 09:22:23 +01004653 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004654 std::vector<std::vector<T>> tmpInputDataStorage;
4655
4656 const size_t inputCount = inputTensorInfos.size();
4657
4658 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4659
4660 if (needPermuteForConcat)
4661 {
4662 //
4663 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004664 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004665 //
4666 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004667 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004668 inputTensorInfos,
4669 inputs,
4670 tmpInputDataStorage,
4671 permuteVector,
4672 concatDim,
4673 outputTensorInfo);
4674 }
4675
narpra015cdda352018-11-19 15:30:27 +00004676 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004677
4678 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4679 inputHandles.reserve(inputCount);
4680
narpra015cdda352018-11-19 15:30:27 +00004681 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4682
Jim Flynne242f2d2019-05-22 14:24:13 +01004683 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004684 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004685 queueDescriptor.m_Parameters = viewsDescriptor;
4686
4687 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004688 {
narpra015cdda352018-11-19 15:30:27 +00004689 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4690 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4691 {
4692 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4693 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4694 }
telsoa014fcda012018-03-09 14:13:49 +00004695
narpra015cdda352018-11-19 15:30:27 +00004696 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004697
narpra015cdda352018-11-19 15:30:27 +00004698 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4699 for (unsigned int i = 0; i < inputCount; ++i)
4700 {
4701 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4702 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4703 subTensorsSupported ?
4704 workloadFactory.CreateSubTensorHandle(*outputHandle,
4705 inputTensorInfo.GetShape(),
4706 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4707 workloadFactory.CreateTensorHandle(inputTensorInfo);
4708
4709 inputHandles.emplace_back(std::move(inputHandle));
4710 }
4711
telsoa014fcda012018-03-09 14:13:49 +00004712 }
narpra015cdda352018-11-19 15:30:27 +00004713 else
4714 {
4715 for (unsigned int i = 0; i < inputCount; ++i)
4716 {
4717 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4718 inputHandles.emplace_back(std::move(inputHandle));
4719 }
4720 }
telsoa014fcda012018-03-09 14:13:49 +00004721
4722 for (unsigned int i = 0; i < inputCount; ++i)
4723 {
surmeh013537c2c2018-05-18 16:31:43 +01004724 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004725 }
4726
4727 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4728
Jim Flynn4ed6c832019-05-20 11:02:46 +01004729 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004730
4731 for (auto& inputHandle : inputHandles)
4732 {
4733 inputHandle->Allocate();
4734 }
4735
4736 outputHandle->Allocate();
4737
4738 unsigned int nextInputId = 0;
4739 for (auto& inputHandle : inputHandles)
4740 {
surmeh013537c2c2018-05-18 16:31:43 +01004741 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4742 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004743 }
4744
Derek Lambertif30f7d32019-04-09 10:25:02 +01004745 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004746 workload->Execute();
4747
surmeh013537c2c2018-05-18 16:31:43 +01004748 if (needPermuteForConcat)
4749 {
4750 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004751 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004752 outputTensorInfo,
4753 permuteVector,
4754 std::move(outputHandle),
4755 output);
4756 }
4757 else
4758 {
4759 CopyDataFromITensorHandle(output, outputHandle.get());
4760 }
telsoa014fcda012018-03-09 14:13:49 +00004761}
4762
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004763template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764LayerTestResult<T, 1> Concatenation1dTestImpl(
4765 armnn::IWorkloadFactory& workloadFactory,
4766 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4767 float qScale,
4768 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004769{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004770 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004771
4772 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4773 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4774 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4775
Jim Flynncbb66aa2019-05-15 13:03:54 +01004776 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004777
4778 LayerTestResult<T, 1> result(outputTensorInfo);
4779
4780 std::vector<T> output;
4781 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004782 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004783 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4784 { input0.data(), input1.data(), input2.data() },
4785 outputTensorInfo,
4786 output.data(),
4787 0,
4788 true);
telsoa014fcda012018-03-09 14:13:49 +00004789
4790 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4791 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4792 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4793 }));
4794
4795 return result;
4796}
4797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004798LayerTestResult<float, 1> Concatenation1dTest(
4799 armnn::IWorkloadFactory& workloadFactory,
4800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004801{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004802 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004803}
4804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004805template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004806LayerTestResult<T, 2> Concatenation2dTestImpl(
4807 armnn::IWorkloadFactory& workloadFactory,
4808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004809 const armnn::TensorInfo& outputTensorInfo,
4810 unsigned int dimension,
4811 const float qScale,
4812 const int32_t qOffset)
4813{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004814 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004815
4816 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4817 // Batch 0
4818 1.0f, 2.0f, 3.0f,
4819
4820 // Batch 1
4821 10.0f, 11.0f, 12.0f,
4822 }));
4823
4824 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4825 // Batch 0
4826 4.0f, 5.0f, 6.0f,
4827
4828 // Batch 1
4829 13.0f, 14.0f, 15.0f,
4830 }));
4831
4832 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4833 // Batch 0
4834 7.0f, 8.0f, 9.0f,
4835
4836 // Batch 1
4837 16.0f, 17.0f, 18.0f,
4838 }));
4839
4840 LayerTestResult<T, 2> result(outputTensorInfo);
4841
4842 std::vector<T> output;
4843 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004844 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004845 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4846 { input0.data(), input1.data(), input2.data() },
4847 outputTensorInfo,
4848 output.data(),
4849 dimension,
4850 true);
telsoa014fcda012018-03-09 14:13:49 +00004851
4852 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4853 return result;
4854}
4855
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004856template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004857LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4858 armnn::IWorkloadFactory& workloadFactory,
4859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4860 float qScale,
4861 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004862{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004863 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004864
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004865 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4866 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4867
telsoa014fcda012018-03-09 14:13:49 +00004868 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4869 // Batch 0
4870 1.0f, 2.0f, 3.0f,
4871
4872 // Batch 1
4873 10.0f, 11.0f, 12.0f,
4874
4875 // Batch 2
4876 4.0f, 5.0f, 6.0f,
4877
4878 // Batch 3
4879 13.0f, 14.0f, 15.0f,
4880
4881 // Batch 4
4882 7.0f, 8.0f, 9.0f,
4883
4884 // Batch 5
4885 16.0f, 17.0f, 18.0f,
4886 }));
4887
4888 return result;
4889}
4890
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004891LayerTestResult<float, 2> Concatenation2dDim0Test(
4892 armnn::IWorkloadFactory& workloadFactory,
4893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004895 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004896}
4897
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004898template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004899LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4900 armnn::IWorkloadFactory& workloadFactory,
4901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4902 float qScale,
4903 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004904{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004905 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004906
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004907 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4908 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4909
telsoa014fcda012018-03-09 14:13:49 +00004910 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4911 // Batch 0
4912 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4913
4914 // Batch 1
4915 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4916 }));
4917
4918 return result;
4919}
4920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004921LayerTestResult<float, 2> Concatenation2dDim1Test(
4922 armnn::IWorkloadFactory& workloadFactory,
4923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004925 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004926}
4927
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004928template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004929LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4930 armnn::IWorkloadFactory& workloadFactory,
4931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4932 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004933 int32_t qOffset)
4934{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004935 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004936 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4937 // Batch 0
4938 1.0f, 2.0f, 3.0f,
4939
4940 // Batch 1
4941 10.0f, 11.0f, 12.0f,
4942 }));
4943
Jim Flynncbb66aa2019-05-15 13:03:54 +01004944 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004945 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4946 // Batch 0
4947 4.0f, 5.0f, 6.0f,
4948
4949 // Batch 1
4950 13.0f, 14.0f, 15.0f,
4951
4952 // Batch 0
4953 7.0f, 8.0f, 9.0f,
4954 }));
4955
Jim Flynncbb66aa2019-05-15 13:03:54 +01004956 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004957 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4958 // Batch 1
4959 16.0f, 17.0f, 18.0f,
4960 }));
4961
Jim Flynncbb66aa2019-05-15 13:03:54 +01004962 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004963 LayerTestResult<T, 2> result(outputTensorInfo);
4964
4965 std::vector<T> output;
4966 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004967 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004968 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4969 { input0.data(), input1.data(), input2.data() },
4970 outputTensorInfo,
4971 output.data(),
4972 0,
4973 true);
telsoa014fcda012018-03-09 14:13:49 +00004974
4975 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4976 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4977 // Batch 0
4978 1.0f, 2.0f, 3.0f,
4979
4980 // Batch 1
4981 10.0f, 11.0f, 12.0f,
4982
4983 // Batch 2
4984 4.0f, 5.0f, 6.0f,
4985
4986 // Batch 3
4987 13.0f, 14.0f, 15.0f,
4988
4989 // Batch 4
4990 7.0f, 8.0f, 9.0f,
4991
4992 // Batch 5
4993 16.0f, 17.0f, 18.0f,
4994 }));
4995
4996 return result;
4997}
4998
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004999LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5000 armnn::IWorkloadFactory& workloadFactory,
5001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005002{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005003 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5004 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005005}
5006
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005007template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005008LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5009 armnn::IWorkloadFactory& workloadFactory,
5010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5011 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005012 int32_t qOffset)
5013{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005014 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005015 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5016 // Batch 0
5017 1.0f, 2.0f, 3.0f,
5018
5019 // Batch 1
5020 10.0f, 11.0f, 12.0f,
5021 }));
5022
Jim Flynncbb66aa2019-05-15 13:03:54 +01005023 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005024 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5025 // Batch 0
5026 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5027
5028 // Batch 1
5029 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5030 }));
5031
Jim Flynncbb66aa2019-05-15 13:03:54 +01005032 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005033 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5034 // Batch 0
5035 9.0f,
5036
5037 // Batch 1
5038 18.0f
5039 }));
5040
Jim Flynncbb66aa2019-05-15 13:03:54 +01005041 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005042 LayerTestResult<T, 2> result(outputTensorInfo);
5043
5044 std::vector<T> output;
5045 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005046 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005047 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5048 { input0.data(), input1.data(), input2.data() },
5049 outputTensorInfo,
5050 output.data(),
5051 1,
5052 true);
telsoa014fcda012018-03-09 14:13:49 +00005053
5054 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5055 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5056 // Batch 0
5057 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5058
5059 // Batch 1
5060 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5061 }));
5062
5063 return result;
5064}
5065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005066LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5067 armnn::IWorkloadFactory& workloadFactory,
5068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005069{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005070 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5071 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005072}
5073
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005074template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005075LayerTestResult<T, 3> Concatenation3dTestImpl(
5076 armnn::IWorkloadFactory& workloadFactory,
5077 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005078 const armnn::TensorInfo& outputTensorInfo,
5079 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00005080 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00005081 float qScale,
5082 int32_t qOffset)
5083{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005084 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005085
5086 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5087 // Batch 0, Channel 0
5088 1.0f, 2.0f,
5089
5090 // Batch 0, Channel 1
5091 3.0f, 4.0f,
5092
5093 // Batch 0, Channel 2
5094 5.0f, 6.0f,
5095
5096 // Batch 1, Channel 0
5097 19.0f, 20.0f,
5098
5099 // Batch 1, Channel 1
5100 21.0f, 22.0f,
5101
5102 // Batch 1, Channel 2
5103 23.0f, 24.0f
5104 }));
5105
5106 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5107 // Batch 0, Channel 0
5108 7.0f, 8.0f,
5109
5110 // Batch 0, Channel 1
5111 9.0f, 10.0f,
5112
5113 // Batch 0, Channel 2
5114 11.0f, 12.0f,
5115
5116 // Batch 1, Channel 0
5117 25.0f, 26.0f,
5118
5119 // Batch 1, Channel 1
5120 27.0f, 28.0f,
5121
5122 // Batch 1, Channel 2
5123 29.0f, 30.0f
5124 }));
5125
5126 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5127 // Batch 0, Channel 0
5128 13.0f, 14.0f,
5129
5130 // Batch 0, Channel 1
5131 15.0f, 16.0f,
5132
5133 // Batch 0, Channel 2
5134 17.0f, 18.0f,
5135
5136 // Batch 1, Channel 0
5137 31.0f, 32.0f,
5138
5139 // Batch 1, Channel 1
5140 33.0f, 34.0f,
5141
5142 // Batch 1, Channel 2
5143 35.0f, 36.0f
5144 }));
5145
5146 LayerTestResult<T, 3> result(outputTensorInfo);
5147
5148 std::vector<T> output;
5149 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005150 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005151 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5152 { input0.data(), input1.data(), input2.data() },
5153 outputTensorInfo,
5154 output.data(),
5155 dimension,
5156 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005157
5158 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5159 return result;
5160}
5161
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005162template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005163LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5164 armnn::IWorkloadFactory& workloadFactory,
5165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5166 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005167 int32_t qOffset)
5168{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005169 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005170
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005171 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5172 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5173
telsoa014fcda012018-03-09 14:13:49 +00005174 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5175 // Batch 0, Channel 0
5176 1.0f, 2.0f,
5177
5178 // Batch 0, Channel 1
5179 3.0f, 4.0f,
5180
5181 // Batch 0, Channel 2
5182 5.0f, 6.0f,
5183
5184 // Batch 1, Channel 0
5185 19.0f, 20.0f,
5186
5187 // Batch 1, Channel 1
5188 21.0f, 22.0f,
5189
5190 // Batch 1, Channel 2
5191 23.0f, 24.0f,
5192
5193 // Batch 2, Channel 0
5194 7.0f, 8.0f,
5195
5196 // Batch 2, Channel 1
5197 9.0f, 10.0f,
5198
5199 // Batch 2, Channel 2
5200 11.0f, 12.0f,
5201
5202 // Batch 3, Channel 0
5203 25.0f, 26.0f,
5204
5205 // Batch 3, Channel 1
5206 27.0f, 28.0f,
5207
5208 // Batch 3, Channel 2
5209 29.0f, 30.0f,
5210
5211 // Batch 4, Channel 0
5212 13.0f, 14.0f,
5213
5214 // Batch 4, Channel 1
5215 15.0f, 16.0f,
5216
5217 // Batch 4, Channel 2
5218 17.0f, 18.0f,
5219
5220 // Batch 5, Channel 0
5221 31.0f, 32.0f,
5222
5223 // Batch 5, Channel 1
5224 33.0f, 34.0f,
5225
5226 // Batch 5, Channel 2
5227 35.0f, 36.0f
5228 }));
narpra015cdda352018-11-19 15:30:27 +00005229
telsoa014fcda012018-03-09 14:13:49 +00005230 return result;
5231}
5232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005233LayerTestResult<float, 3> Concatenation3dDim0Test(
5234 armnn::IWorkloadFactory& workloadFactory,
5235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005236{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005237 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005238}
5239
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005240template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005241LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5242 armnn::IWorkloadFactory& workloadFactory,
5243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5244 float qScale,
5245 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005246{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005247 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005248
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005249 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5250 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005251
telsoa014fcda012018-03-09 14:13:49 +00005252 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5253 // Batch 0, Channel 0
5254 1.0f, 2.0f,
5255
5256 // Batch 0, Channel 1
5257 3.0f, 4.0f,
5258
5259 // Batch 0, Channel 2
5260 5.0f, 6.0f,
5261
5262 // Batch 0, Channel 3
5263 7.0f, 8.0f,
5264
5265 // Batch 0, Channel 4
5266 9.0f, 10.0f,
5267
5268 // Batch 0, Channel 5
5269 11.0f, 12.0f,
5270
5271 // Batch 0, Channel 6
5272 13.0f, 14.0f,
5273
5274 // Batch 0, Channel 7
5275 15.0f, 16.0f,
5276
5277 // Batch 0, Channel 8
5278 17.0f, 18.0f,
5279
5280 // Batch 1, Channel 0
5281 19.0f, 20.0f,
5282
5283 // Batch 1, Channel 1
5284 21.0f, 22.0f,
5285
5286 // Batch 1, Channel 2
5287 23.0f, 24.0f,
5288
5289 // Batch 1, Channel 3
5290 25.0f, 26.0f,
5291
5292 // Batch 1, Channel 4
5293 27.0f, 28.0f,
5294
5295 // Batch 1, Channel 5
5296 29.0f, 30.0f,
5297
5298 // Batch 1, Channel 6
5299 31.0f, 32.0f,
5300
5301 // Batch 1, Channel 7
5302 33.0f, 34.0f,
5303
5304 // Batch 1, Channel 8
5305 35.0f, 36.0f
5306 }));
5307
5308 return result;
5309}
5310
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005311LayerTestResult<float, 3> Concatenation3dDim1Test(
5312 armnn::IWorkloadFactory& workloadFactory,
5313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005314{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005315 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005316}
5317
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005318template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005319LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5320 armnn::IWorkloadFactory& workloadFactory,
5321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005322 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005323 float qScale,
5324 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005325{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005326 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005327
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005328 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5329 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005330
telsoa014fcda012018-03-09 14:13:49 +00005331 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5332 // Batch 0, Channel 0
5333 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5334
5335 // Batch 0, Channel 1
5336 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5337
5338 // Batch 0, Channel 2
5339 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5340
5341 // Batch 1, Channel 0
5342 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5343
5344 // Batch 1, Channel 1
5345 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5346
5347 // Batch 1, Channel 2
5348 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5349 }));
5350
5351 return result;
5352}
5353
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005354LayerTestResult<float, 3> Concatenation3dDim2Test(
5355 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5357 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005358{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005359 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5360 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005361}
5362
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005363template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005364LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5365 armnn::IWorkloadFactory& workloadFactory,
5366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5367 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005368 int32_t qOffset)
5369{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005370 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005371 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5372 // Batch 0, Channel 0
5373 1.0f, 2.0f,
5374
5375 // Batch 0, Channel 1
5376 3.0f, 4.0f,
5377
5378 // Batch 0, Channel 2
5379 5.0f, 6.0f,
5380
5381 // Batch 1, Channel 0
5382 19.0f, 20.0f,
5383
5384 // Batch 1, Channel 1
5385 21.0f, 22.0f,
5386
5387 // Batch 1, Channel 2
5388 23.0f, 24.0f
5389 }));
5390
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005391 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005392 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5393 // Batch 0, Channel 0
5394 7.0f, 8.0f,
5395
5396 // Batch 0, Channel 1
5397 9.0f, 10.0f,
5398
5399 // Batch 0, Channel 2
5400 11.0f, 12.0f,
5401 }));
5402
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005403 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005404 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5405 // Batch 0, Channel 0
5406 25.0f, 26.0f,
5407
5408 // Batch 0, Channel 1
5409 27.0f, 28.0f,
5410
5411 // Batch 0, Channel 2
5412 29.0f, 30.0f,
5413
5414 // Batch 1, Channel 0
5415 13.0f, 14.0f,
5416
5417 // Batch 1, Channel 1
5418 15.0f, 16.0f,
5419
5420 // Batch 1, Channel 2
5421 17.0f, 18.0f,
5422
5423 // Batch 2, Channel 0
5424 31.0f, 32.0f,
5425
5426 // Batch 2, Channel 1
5427 33.0f, 34.0f,
5428
5429 // Batch 2, Channel 2
5430 35.0f, 36.0f
5431 }));
5432
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005433 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005434 LayerTestResult<T, 3> result(outputTensorInfo);
5435
5436 std::vector<T> output;
5437 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005438 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005439 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5440 { input0.data(), input1.data(), input2.data() },
5441 outputTensorInfo,
5442 output.data(),
5443 0,
5444 true);
telsoa014fcda012018-03-09 14:13:49 +00005445
5446 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5447 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5448 // Batch 0, Channel 0
5449 1.0f, 2.0f,
5450
5451 // Batch 0, Channel 1
5452 3.0f, 4.0f,
5453
5454 // Batch 0, Channel 2
5455 5.0f, 6.0f,
5456
5457 // Batch 1, Channel 0
5458 19.0f, 20.0f,
5459
5460 // Batch 1, Channel 1
5461 21.0f, 22.0f,
5462
5463 // Batch 1, Channel 2
5464 23.0f, 24.0f,
5465
5466 // Batch 2, Channel 0
5467 7.0f, 8.0f,
5468
5469 // Batch 2, Channel 1
5470 9.0f, 10.0f,
5471
5472 // Batch 2, Channel 2
5473 11.0f, 12.0f,
5474
5475 // Batch 3, Channel 0
5476 25.0f, 26.0f,
5477
5478 // Batch 3, Channel 1
5479 27.0f, 28.0f,
5480
5481 // Batch 3, Channel 2
5482 29.0f, 30.0f,
5483
5484 // Batch 4, Channel 0
5485 13.0f, 14.0f,
5486
5487 // Batch 4, Channel 1
5488 15.0f, 16.0f,
5489
5490 // Batch 4, Channel 2
5491 17.0f, 18.0f,
5492
5493 // Batch 5, Channel 0
5494 31.0f, 32.0f,
5495
5496 // Batch 5, Channel 1
5497 33.0f, 34.0f,
5498
5499 // Batch 5, Channel 2
5500 35.0f, 36.0f
5501 }));
5502
5503 return result;
5504}
5505
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005506LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5507 armnn::IWorkloadFactory& workloadFactory,
5508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005509{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005510 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5511 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005512}
5513
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005514template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005515LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5516 armnn::IWorkloadFactory& workloadFactory,
5517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5518 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005519 int32_t qOffset)
5520{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005521 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005522 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5523 // Batch 0, Channel 0
5524 1.0f, 2.0f,
5525
5526 // Batch 0, Channel 1
5527 3.0f, 4.0f,
5528
5529 // Batch 0, Channel 2
5530 5.0f, 6.0f,
5531
5532 // Batch 1, Channel 0
5533 19.0f, 20.0f,
5534
5535 // Batch 1, Channel 1
5536 21.0f, 22.0f,
5537
5538 // Batch 1, Channel 2
5539 23.0f, 24.0f
5540 }));
5541
Jim Flynncbb66aa2019-05-15 13:03:54 +01005542 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005543 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5544 // Batch 0, Channel 0
5545 7.0f, 8.0f,
5546
5547 // Batch 0, Channel 1
5548 9.0f, 10.0f,
5549
5550 // Batch 0, Channel 2
5551 11.0f, 12.0f,
5552
5553 // Batch 0, Channel 3
5554 25.0f, 26.0f,
5555
5556 // Batch 1, Channel 0
5557 27.0f, 28.0f,
5558
5559 // Batch 1, Channel 1
5560 29.0f, 30.0f,
5561
5562 // Batch 1, Channel 2
5563 13.0f, 14.0f,
5564
5565 // Batch 1, Channel 3
5566 15.0f, 16.0f,
5567 }));
5568
Jim Flynncbb66aa2019-05-15 13:03:54 +01005569 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005570 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5571 // Batch 0, Channel 0
5572 17.0f, 18.0f,
5573
5574 // Batch 1, Channel 0
5575 31.0f, 32.0f,
5576 }));
5577
Jim Flynncbb66aa2019-05-15 13:03:54 +01005578 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005579 LayerTestResult<T, 3> result(outputTensorInfo);
5580
5581 std::vector<T> output;
5582 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005583 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005584 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5585 { input0.data(), input1.data(), input2.data() },
5586 outputTensorInfo,
5587 output.data(),
5588 1,
5589 true);
telsoa014fcda012018-03-09 14:13:49 +00005590
5591 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5592 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5593 // Batch 0, Channel 0
5594 1.0f, 2.0f,
5595
5596 // Batch 0, Channel 1
5597 3.0f, 4.0f,
5598
5599 // Batch 0, Channel 2
5600 5.0f, 6.0f,
5601
5602 // Batch 0, Channel 3
5603 7.0f, 8.0f,
5604
5605 // Batch 0, Channel 4
5606 9.0f, 10.0f,
5607
5608 // Batch 0, Channel 5
5609 11.0f, 12.0f,
5610
5611 // Batch 0, Channel 6
5612 25.0f, 26.0f,
5613
5614 // Batch 0, Channel 7
5615 17.0f, 18.0f,
5616
5617 // Batch 1, Channel 0
5618 19.0f, 20.0f,
5619
5620 // Batch 1, Channel 1
5621 21.0f, 22.0f,
5622
5623 // Batch 1, Channel 2
5624 23.0f, 24.0f,
5625
5626 // Batch 1, Channel 3
5627 27.0f, 28.0f,
5628
5629 // Batch 1, Channel 4
5630 29.0f, 30.0f,
5631
5632 // Batch 1, Channel 5
5633 13.0f, 14.0f,
5634
5635 // Batch 1, Channel 6
5636 15.0f, 16.0f,
5637
5638 // Batch 1, Channel 7
5639 31.0f, 32.0f,
5640 }));
5641
5642 return result;
5643}
5644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005645LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5646 armnn::IWorkloadFactory& workloadFactory,
5647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005649 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5650 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005651}
5652
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005653template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005654LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5655 armnn::IWorkloadFactory& workloadFactory,
5656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005657 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005658 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005659 int32_t qOffset)
5660{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005661 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005662 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5663 // Batch 0, Channel 0
5664 1.0f, 2.0f,
5665
5666 // Batch 0, Channel 1
5667 3.0f, 4.0f,
5668
5669 // Batch 0, Channel 2
5670 5.0f, 6.0f,
5671
5672 // Batch 1, Channel 0
5673 19.0f, 20.0f,
5674
5675 // Batch 1, Channel 1
5676 21.0f, 22.0f,
5677
5678 // Batch 1, Channel 2
5679 23.0f, 24.0f
5680 }));
5681
Jim Flynncbb66aa2019-05-15 13:03:54 +01005682 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005683 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5684 // Batch 0, Channel 0
5685 7.0f,
5686
5687 // Batch 0, Channel 1
5688 9.0f,
5689
5690 // Batch 0, Channel 2
5691 11.0f,
5692
5693 // Batch 1, Channel 0
5694 25.0f,
5695
5696 // Batch 1, Channel 1
5697 27.0f,
5698
5699 // Batch 1, Channel 2
5700 29.0f
5701 }));
5702
Jim Flynncbb66aa2019-05-15 13:03:54 +01005703 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005704 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5705 // Batch 0, Channel 0
5706 13.0f, 14.0f, 50.0f,
5707
5708 // Batch 0, Channel 1
5709 15.0f, 16.0f, 51.0f,
5710
5711 // Batch 0, Channel 2
5712 17.0f, 18.0f, 52.0f,
5713
5714 // Batch 1, Channel 0
5715 31.0f, 32.0f, 53.0f,
5716
5717 // Batch 1, Channel 1
5718 33.0f, 34.0f, 54.0f,
5719
5720 // Batch 1, Channel 2
5721 35.0f, 36.0f, 55.0f,
5722 }));
5723
Jim Flynncbb66aa2019-05-15 13:03:54 +01005724 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005725 LayerTestResult<T, 3> result(outputTensorInfo);
5726
5727 std::vector<T> output;
5728 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005729 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005730 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5731 { input0.data(), input1.data(), input2.data() },
5732 outputTensorInfo,
5733 output.data(),
5734 2,
5735 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005736
5737 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5738 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5739 // Batch 0, Channel 0
5740 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5741
5742 // Batch 0, Channel 1
5743 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5744
5745 // Batch 0, Channel 2
5746 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5747
5748 // Batch 1, Channel 0
5749 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5750
5751 // Batch 1, Channel 1
5752 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5753
5754 // Batch 1, Channel 2
5755 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5756 }));
5757
5758 return result;
5759}
5760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005761LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5762 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5764 bool useSubtensor)
5765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005766 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5767 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005768}
5769
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005770template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005771LayerTestResult<T, 4> Concatenation4dTestImpl(
5772 armnn::IWorkloadFactory& workloadFactory,
5773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5774 const armnn::TensorInfo& outputTensorInfo,
5775 unsigned int dimension,
5776 bool useSubtensor,
5777 float qScale,
5778 int32_t qOffset)
5779{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005780 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005781
5782 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5783 1.0f, 2.0f,
5784 3.0f, 4.0f,
5785 5.0f, 6.0f,
5786 7.0f, 8.0f,
5787 9.0f, 10.0f,
5788 11.0f, 12.0f
5789 }));
5790
5791 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5792 11.0f, 12.0f,
5793 13.0f, 14.0f,
5794 15.0f, 16.0f,
5795 17.0f, 18.0f,
5796 19.0f, 20.0f,
5797 21.0f, 22.0f
5798 }));
5799
5800 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5801 21.0f, 22.0f,
5802 23.0f, 24.0f,
5803 25.0f, 26.0f,
5804 27.0f, 28.0f,
5805 29.0f, 30.0f,
5806 31.0f, 32.0f
5807 }));
5808
5809 LayerTestResult<T, 4> result(outputTensorInfo);
5810
5811 std::vector<T> output;
5812 output.resize(outputTensorInfo.GetNumElements());
5813
5814 Concatenate<T>(workloadFactory,
5815 memoryManager,
5816 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5817 {input0.data(), input1.data(), input2.data()},
5818 outputTensorInfo,
5819 output.data(),
5820 dimension,
5821 useSubtensor);
5822
5823 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5824 return result;
5825}
5826
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005827template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005828LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5829 armnn::IWorkloadFactory& workloadFactory,
5830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5831 float qScale,
5832 int32_t qOffset)
5833{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005834 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005835
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005836 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5837 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5838
narpra015cdda352018-11-19 15:30:27 +00005839 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5840 1.0f, 2.0f,
5841 3.0f, 4.0f,
5842 5.0f, 6.0f,
5843 7.0f, 8.0f,
5844 9.0f, 10.0f,
5845 11.0f, 12.0f,
5846
5847 11.0f, 12.0f,
5848 13.0f, 14.0f,
5849 15.0f, 16.0f,
5850 17.0f, 18.0f,
5851 19.0f, 20.0f,
5852 21.0f, 22.0f,
5853
5854 21.0f, 22.0f,
5855 23.0f, 24.0f,
5856 25.0f, 26.0f,
5857 27.0f, 28.0f,
5858 29.0f, 30.0f,
5859 31.0f, 32.0f
5860 }));
5861 return result;
5862}
5863
5864LayerTestResult<float, 4> Concatenation4dDim0Test(
5865 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005867{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005868 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005869}
5870
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005871template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005872LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5873 armnn::IWorkloadFactory& workloadFactory,
5874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5875 float qScale,
5876 int32_t qOffset)
5877{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005878 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005879
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005880 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5881 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5882
narpra015cdda352018-11-19 15:30:27 +00005883 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5884 1.0f, 2.0f,
5885 3.0f, 4.0f,
5886 5.0f, 6.0f,
5887 7.0f, 8.0f,
5888 9.0f, 10.0f,
5889 11.0f, 12.0f,
5890
5891 11.0f, 12.0f,
5892 13.0f, 14.0f,
5893 15.0f, 16.0f,
5894 17.0f, 18.0f,
5895 19.0f, 20.0f,
5896 21.0f, 22.0f,
5897
5898 21.0f, 22.0f,
5899 23.0f, 24.0f,
5900 25.0f, 26.0f,
5901 27.0f, 28.0f,
5902 29.0f, 30.0f,
5903 31.0f, 32.0f
5904 }));
5905
5906 return result;
5907}
5908
5909LayerTestResult<float, 4> Concatenation4dDim1Test(
5910 armnn::IWorkloadFactory& workloadFactory,
5911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5912{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005913 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005914}
5915
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005916template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005917LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5918 armnn::IWorkloadFactory& workloadFactory,
5919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5920 float qScale,
5921 int32_t qOffset)
5922{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005923 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005924
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005925 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5926 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5927
narpra015cdda352018-11-19 15:30:27 +00005928 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5929 1.0f, 2.0f,
5930 3.0f, 4.0f,
5931 11.0f, 12.0f,
5932 13.0f, 14.0f,
5933 21.0f, 22.0f,
5934 23.0f, 24.0f,
5935
5936 5.0f, 6.0f,
5937 7.0f, 8.0f,
5938 15.0f, 16.0f,
5939 17.0f, 18.0f,
5940 25.0f, 26.0f,
5941 27.0f, 28.0f,
5942
5943 9.0f, 10.0f,
5944 11.0f, 12.0f,
5945 19.0f, 20.0f,
5946 21.0f, 22.0f,
5947 29.0f, 30.0f,
5948 31.0f, 32.0f
5949 }));
5950
5951 return result;
5952}
5953
5954LayerTestResult<float, 4> Concatenation4dDim2Test(
5955 armnn::IWorkloadFactory& workloadFactory,
5956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5957{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005958 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005959}
5960
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005961template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005962LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5963 armnn::IWorkloadFactory& workloadFactory,
5964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5965 float qScale,
5966 int32_t qOffset,
5967 bool useSubtensor)
5968{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005969 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005971 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5972 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5973
narpra015cdda352018-11-19 15:30:27 +00005974 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5975 1.0f, 2.0f,
5976 11.0f, 12.0f,
5977 21.0f, 22.0f,
5978 3.0f, 4.0f,
5979 13.0f, 14.0f,
5980 23.0f, 24.0f,
5981
5982 5.0f, 6.0f,
5983 15.0f, 16.0f,
5984 25.0f, 26.0f,
5985 7.0f, 8.0f,
5986 17.0f, 18.0f,
5987 27.0f, 28.0f,
5988
5989 9.0f, 10.0f,
5990 19.0f, 20.0f,
5991 29.0f, 30.0f,
5992 11.0f, 12.0f,
5993 21.0f, 22.0f,
5994 31.0f, 32.0f
5995 }));
5996
5997 return result;
5998}
5999
6000LayerTestResult<float, 4> Concatenation4dDim3Test(
6001 armnn::IWorkloadFactory& workloadFactory,
6002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6003 bool useSubtensor)
6004{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006005 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6006 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00006007}
6008
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006009template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006010LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6011 armnn::IWorkloadFactory& workloadFactory,
6012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6013 float qScale,
6014 int32_t qOffset)
6015{
6016 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006017 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006018
6019 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6020 1.0f, 2.0f,
6021 3.0f, 4.0f,
6022 5.0f, 6.0f,
6023 7.0f, 8.0f,
6024 9.0f, 10.0f,
6025 11.0f, 12.0f
6026 }));
6027
Jim Flynncbb66aa2019-05-15 13:03:54 +01006028 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006029
6030 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6031 11.0f, 12.0f,
6032 13.0f, 14.0f,
6033 15.0f, 16.0f,
6034 17.0f, 18.0f,
6035 19.0f, 20.0f,
6036 21.0f, 22.0f,
6037
6038 21.0f, 22.0f,
6039 23.0f, 24.0f,
6040 25.0f, 26.0f,
6041 27.0f, 28.0f,
6042 29.0f, 30.0f,
6043 31.0f, 32.0f
6044
6045 }));
6046
Jim Flynncbb66aa2019-05-15 13:03:54 +01006047 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006048
6049 LayerTestResult<T, 4> result(outputTensorInfo);
6050
6051 std::vector<T> output;
6052 output.resize(outputTensorInfo.GetNumElements());
6053 Concatenate<T>(workloadFactory,
6054 memoryManager,
6055 {inputTensorInfo0, inputTensorInfo1},
6056 {input0.data(), input1.data()},
6057 outputTensorInfo,
6058 output.data(),
6059 dimension,
6060 true);
6061
6062 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6063 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6064 1.0f, 2.0f,
6065 3.0f, 4.0f,
6066 5.0f, 6.0f,
6067 7.0f, 8.0f,
6068 9.0f, 10.0f,
6069 11.0f, 12.0f,
6070
6071 11.0f, 12.0f,
6072 13.0f, 14.0f,
6073 15.0f, 16.0f,
6074 17.0f, 18.0f,
6075 19.0f, 20.0f,
6076 21.0f, 22.0f,
6077
6078 21.0f, 22.0f,
6079 23.0f, 24.0f,
6080 25.0f, 26.0f,
6081 27.0f, 28.0f,
6082 29.0f, 30.0f,
6083 31.0f, 32.0f
6084 }));
6085
6086 return result;
6087}
6088
6089LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6090 armnn::IWorkloadFactory& workloadFactory,
6091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6092{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006093 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6094 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006095}
6096
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006097template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006098LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6099 armnn::IWorkloadFactory& workloadFactory,
6100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6101 float qScale,
6102 int32_t qOffset)
6103{
6104 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006105 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006106
6107 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6108 1.0f, 2.0f,
6109 3.0f, 4.0f,
6110 5.0f, 6.0f,
6111 7.0f, 8.0f,
6112 9.0f, 10.0f,
6113 11.0f, 12.0f
6114 }));
6115
Jim Flynncbb66aa2019-05-15 13:03:54 +01006116 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006117
6118 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6119 11.0f, 12.0f,
6120 13.0f, 14.0f,
6121 15.0f, 16.0f,
6122 17.0f, 18.0f,
6123
6124 }));
6125
Jim Flynncbb66aa2019-05-15 13:03:54 +01006126 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006127
6128 LayerTestResult<T, 4> result(outputTensorInfo);
6129
6130 std::vector<T> output;
6131 output.resize(outputTensorInfo.GetNumElements());
6132 Concatenate<T>(workloadFactory,
6133 memoryManager,
6134 {inputTensorInfo0, inputTensorInfo1},
6135 {input0.data(), input1.data()},
6136 outputTensorInfo,
6137 output.data(),
6138 dimension,
6139 true);
6140
6141 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6142 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6143 1.0f, 2.0f,
6144 3.0f, 4.0f,
6145 5.0f, 6.0f,
6146 7.0f, 8.0f,
6147 9.0f, 10.0f,
6148 11.0f, 12.0f,
6149 11.0f, 12.0f,
6150 13.0f, 14.0f,
6151 15.0f, 16.0f,
6152 17.0f, 18.0f
6153 }));
6154
6155 return result;
6156}
6157
6158LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6159 armnn::IWorkloadFactory& workloadFactory,
6160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006162 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6163 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006164}
6165
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006166template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006167LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6168 armnn::IWorkloadFactory& workloadFactory,
6169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6170 float qScale,
6171 int32_t qOffset)
6172{
6173 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006174 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006175
6176 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6177 1.0f, 2.0f,
6178 3.0f, 4.0f,
6179 5.0f, 6.0f,
6180 7.0f, 8.0f,
6181 9.0f, 10.0f,
6182 11.0f, 12.0f
6183 }));
6184
Jim Flynncbb66aa2019-05-15 13:03:54 +01006185 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006186
6187 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6188 11.0f, 12.0f,
6189 13.0f, 14.0f,
6190 15.0f, 16.0f,
6191 17.0f, 18.0f,
6192 19.0f, 20.0f,
6193 21.0f, 22.0f,
6194 23.0f, 24.0f,
6195 25.0f, 26.0f,
6196 27.0f, 28.0f
6197 }));
6198
Jim Flynncbb66aa2019-05-15 13:03:54 +01006199 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006200
6201 LayerTestResult<T, 4> result(outputTensorInfo);
6202
6203 std::vector<T> output;
6204 output.resize(outputTensorInfo.GetNumElements());
6205 Concatenate<T>(workloadFactory,
6206 memoryManager,
6207 {inputTensorInfo0, inputTensorInfo1},
6208 {input0.data(), input1.data()},
6209 outputTensorInfo,
6210 output.data(),
6211 dimension,
6212 true);
6213
6214 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6215 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6216 1.0f, 2.0f,
6217 3.0f, 4.0f,
6218 11.0f, 12.0f,
6219 13.0f, 14.0f,
6220 15.0f, 16.0f,
6221
6222 5.0f, 6.0f,
6223 7.0f, 8.0f,
6224 17.0f, 18.0f,
6225 19.0f, 20.0f,
6226 21.0f, 22.0f,
6227
6228 9.0f, 10.0f,
6229 11.0f, 12.0f,
6230 23.0f, 24.0f,
6231 25.0f, 26.0f,
6232 27.0f, 28.0f
6233 }));
6234
6235 return result;
6236}
6237
6238LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6239 armnn::IWorkloadFactory& workloadFactory,
6240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006242 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6243 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006244}
6245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006246template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006247LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6248 armnn::IWorkloadFactory& workloadFactory,
6249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6250 float qScale,
6251 int32_t qOffset,
6252 bool useSubtensor)
6253{
6254 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006255 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006256
6257 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6258 1.0f, 2.0f,
6259 3.0f, 4.0f,
6260 5.0f, 6.0f,
6261 7.0f, 8.0f,
6262 9.0f, 10.0f,
6263 11.0f, 12.0f
6264 }));
6265
Jim Flynncbb66aa2019-05-15 13:03:54 +01006266 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006267
6268 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6269 11.0f, 12.0f, 13.0f,
6270 14.0f, 15.0f, 16.0f,
6271
6272 17.0f, 18.0f, 19.0f,
6273 20.0f, 21.0f, 22.0f,
6274
6275 23.0f, 24.0f, 25.0f,
6276 26.0f, 27.0f, 28.0f
6277 }));
6278
Jim Flynncbb66aa2019-05-15 13:03:54 +01006279 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006280
6281 LayerTestResult<T, 4> result(outputTensorInfo);
6282
6283 std::vector<T> output;
6284 output.resize(outputTensorInfo.GetNumElements());
6285 Concatenate<T>(workloadFactory,
6286 memoryManager,
6287 {inputTensorInfo0, inputTensorInfo1},
6288 {input0.data(), input1.data()},
6289 outputTensorInfo,
6290 output.data(),
6291 dimension,
6292 useSubtensor);
6293
6294 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6295 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6296 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6297 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6298 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6299 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6300 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6301 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6302 }));
6303
6304 return result;
6305}
6306
6307LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6308 armnn::IWorkloadFactory& workloadFactory,
6309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6310 bool useSubtensor)
6311{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006312 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6313 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006314}
6315
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006316LayerTestResult<float, 2> FakeQuantizationTest(
6317 armnn::IWorkloadFactory& workloadFactory,
6318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006319{
6320 constexpr unsigned int width = 2;
6321 constexpr unsigned int height = 3;
6322
6323 const armnn::TensorInfo tensorInfo({height, width },
6324 armnn::DataType::Float32);
6325 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6326 -10.0f, -5.0f,
6327 0.0f, 5.0f,
6328 10.0f, 10.0f
6329 }));
6330
6331 LayerTestResult<float, 2> ret(tensorInfo);
6332
6333 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6334
6335 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6336
6337 armnn::FakeQuantizationQueueDescriptor data;
6338 armnn::WorkloadInfo info;
6339
6340 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6341 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6342 float min = -10.f;
6343 float max = 10.f;
6344
6345 data.m_Parameters.m_Min = min;
6346 data.m_Parameters.m_Max = max;
6347
6348 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6349 armnn::FakeQuantizationQueueDescriptor refData = data;
6350 armnn::WorkloadInfo refInfo = info;
6351 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6352
6353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6354
6355 inputHandle->Allocate();
6356 outputHandle->Allocate();
6357
6358 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6359
Derek Lambertif30f7d32019-04-09 10:25:02 +01006360 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006361 workload->Execute();
6362
6363 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6364
6365 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6366 0.0f, 63.0f,
6367 128.0f, 191.0f,
6368 255.0f, 255.0f
6369 }));
6370 return ret;
6371}
6372
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006373namespace
6374{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006375template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6376LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006377 armnn::IWorkloadFactory& workloadFactory,
6378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6379 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006380 float scale,
6381 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006382 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006383 float outScale,
6384 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006385 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006386 const armnn::DataLayout layout,
6387 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006388{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006389 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6390 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006391
jimfly013aab7c32018-11-12 13:32:08 +00006392 // at this point if we require it permute the input data
6393 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6394 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006395 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006396 {
6397 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006398 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006399 inputData = tmp;
6400 }
6401
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006402 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6403 inputTensorInfo.GetQuantizationScale(),
6404 inputTensorInfo.GetQuantizationOffset(),
6405 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006406
jimfly013aab7c32018-11-12 13:32:08 +00006407 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006408 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006409 {
6410 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006411 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6412 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006413 expectedOutputData = tmp;
6414 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006415
6416 LayerTestResult<T, 4> result(outputTensorInfo);
6417 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6418 outputTensorInfo.GetQuantizationScale(),
6419 outputTensorInfo.GetQuantizationOffset(),
6420 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006421
6422 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6423 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6424
6425 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006426 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006427 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006428 armnn::WorkloadInfo info;
6429
6430 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6431 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6432
6433 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6434
6435 inputHandle->Allocate();
6436 outputHandle->Allocate();
6437
6438 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6439
Derek Lambertif30f7d32019-04-09 10:25:02 +01006440 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006441 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006442
6443 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6444
6445 return result;
6446}
6447
6448float CalcInvL2Norm(std::initializer_list<float> elements)
6449{
6450 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6451 [](float acc, float element) { return acc + element * element; });
6452 return 1.0f / sqrtf(reduction);
6453}
6454
6455} // anonymous namespace
6456
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006457template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006458LayerTestResult<T, 2> Pad2dTestCommon(
6459 armnn::IWorkloadFactory& workloadFactory,
6460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6461 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006462 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006463 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006464{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006465 const armnn::TensorShape inputShape{ 3, 3 };
6466 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006467
David Monahan34757812019-06-19 11:47:21 +01006468 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6469 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006470
Derek Lambertif30f7d32019-04-09 10:25:02 +01006471 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006472 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006473 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006474 // Height (3) x Width (3)
6475 4, 8, 6,
6476 7, 4, 4,
6477 3, 2, 4
6478 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006479
Teresa Charlinec8e1982019-07-02 16:24:09 +01006480 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006481 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006482 expectedOutputValues = (
6483 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006484 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006485 p, p, p, p, p, p, p,
6486 p, p, p, p, p, p, p,
6487 p, p, 4, 8, 6, p, p,
6488 p, p, 7, 4, 4, p, p,
6489 p, p, 3, 2, 4, p, p,
6490 p, p, p, p, p, p, p,
6491 p, p, p, p, p, p, p
6492 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006493
Derek Lambertif30f7d32019-04-09 10:25:02 +01006494 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006495
Derek Lambertif30f7d32019-04-09 10:25:02 +01006496 LayerTestResult<T, 2> result(outputTensorInfo);
6497 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006498
Derek Lambertif30f7d32019-04-09 10:25:02 +01006499 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6500 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006501
Derek Lambertif30f7d32019-04-09 10:25:02 +01006502 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006503
Teresa Charlinec8e1982019-07-02 16:24:09 +01006504 std::vector<std::pair<unsigned int, unsigned int>> padList;
6505 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6506 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006507
Teresa Charlinec8e1982019-07-02 16:24:09 +01006508 descriptor.m_Parameters.m_PadList = padList;
6509 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006510 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006511
Derek Lambertif30f7d32019-04-09 10:25:02 +01006512 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6513 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006514
Derek Lambertif30f7d32019-04-09 10:25:02 +01006515 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006516
Derek Lambertif30f7d32019-04-09 10:25:02 +01006517 inputHandle->Allocate();
6518 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006519
Derek Lambertif30f7d32019-04-09 10:25:02 +01006520 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006521
Derek Lambertif30f7d32019-04-09 10:25:02 +01006522 workload->PostAllocationConfigure();
6523 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006524
Derek Lambertif30f7d32019-04-09 10:25:02 +01006525 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006526
Derek Lambertif30f7d32019-04-09 10:25:02 +01006527 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006528}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006529
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006530template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006531LayerTestResult<T, 3> Pad3dTestCommon(
6532 armnn::IWorkloadFactory& workloadFactory,
6533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6534 float qScale,
6535 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006536{
6537 const armnn::TensorShape inputShape{ 2, 2, 2 };
6538 const armnn::TensorShape outputShape{ 3, 5, 6 };
6539
David Monahan34757812019-06-19 11:47:21 +01006540 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6541 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006542
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006543 std::vector<T> inputValues(
6544 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006545 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006546 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006547 0, 4,
6548 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006549
6550 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006551 6, 1,
6552 5, 2
6553 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006554
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006555 std::vector<T> expectedOutputValues(
6556 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006557 {
6558
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006559 0, 0, 0, 0, 0, 0,
6560 0, 0, 0, 0, 0, 0,
6561 0, 0, 0, 4, 0, 0,
6562 0, 0, 2, 5, 0, 0,
6563 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006564
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006565 0, 0, 0, 0, 0, 0,
6566 0, 0, 0, 0, 0, 0,
6567 0, 0, 6, 1, 0, 0,
6568 0, 0, 5, 2, 0, 0,
6569 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006570
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006571 0, 0, 0, 0, 0, 0,
6572 0, 0, 0, 0, 0, 0,
6573 0, 0, 0, 0, 0, 0,
6574 0, 0, 0, 0, 0, 0,
6575 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006576
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006577 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006578
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006579 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006580
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006581 LayerTestResult<T, 3> result(outputTensorInfo);
6582 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006583
6584 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6585 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6586
6587 armnn::PadQueueDescriptor descriptor;
6588
6589 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6590 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6591 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6592 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6593
6594 descriptor.m_Parameters.m_PadList = PadList;
6595 armnn::WorkloadInfo info;
6596
6597 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6598 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6599
6600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6601
6602 inputHandle->Allocate();
6603 outputHandle->Allocate();
6604
6605 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6606
Derek Lambertif30f7d32019-04-09 10:25:02 +01006607 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006608 workload->Execute();
6609
6610 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6611
6612 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006613}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006614
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006615template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006616LayerTestResult<T, 4> Pad4dTestCommon(
6617 armnn::IWorkloadFactory& workloadFactory,
6618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6619 float qScale,
6620 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006621{
6622 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6623 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6624
David Monahan34757812019-06-19 11:47:21 +01006625 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6626 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006627
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006628 std::vector<T> inputValues(
6629 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006630 {
6631 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006632 0, 1,
6633 2, 3,
6634 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006635
6636 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006637 6, 7,
6638 8, 9,
6639 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006640
6641 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006642 12, 13,
6643 14, 15,
6644 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006645
6646 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006647 18, 19,
6648 20, 21,
6649 22, 23
6650 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006651
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006652 std::vector<T> expectedOutputValues(
6653 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006654 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006655 0, 0, 0, 0,
6656 0, 0, 0, 0,
6657 0, 0, 0, 0,
6658 0, 0, 0, 0,
6659 0, 0, 0, 0,
6660 0, 0, 0, 0,
6661 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006662
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006663 0, 0, 0, 0,
6664 0, 0, 0, 0,
6665 0, 0, 0, 0,
6666 0, 0, 0, 0,
6667 0, 0, 0, 0,
6668 0, 0, 0, 0,
6669 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006670
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006671 0, 0, 0, 0,
6672 0, 0, 0, 0,
6673 0, 0, 0, 0,
6674 0, 0, 0, 0,
6675 0, 0, 0, 0,
6676 0, 0, 0, 0,
6677 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006678
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006679 0, 0, 0, 0,
6680 0, 0, 0, 0,
6681 0, 0, 0, 0,
6682 0, 0, 0, 0,
6683 0, 0, 0, 0,
6684 0, 0, 0, 0,
6685 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006686
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006687 0, 0, 0, 0,
6688 0, 0, 0, 0,
6689 0, 0, 0, 0,
6690 0, 0, 0, 0,
6691 0, 0, 0, 0,
6692 0, 0, 0, 0,
6693 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006694
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006695 0, 0, 0, 0,
6696 0, 0, 0, 0,
6697 0, 0, 0, 0,
6698 0, 0, 0, 0,
6699 0, 0, 0, 0,
6700 0, 0, 0, 0,
6701 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006702
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006703 0, 0, 0, 0,
6704 0, 0, 0, 0,
6705 0, 0, 0, 0,
6706 0, 0, 0, 0,
6707 0, 0, 0, 0,
6708 0, 0, 0, 0,
6709 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006710
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006711 0, 0, 0, 0,
6712 0, 0, 0, 0,
6713 0, 0, 0, 0,
6714 0, 0, 1, 0,
6715 0, 2, 3, 0,
6716 0, 4, 5, 0,
6717 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006718
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006719 0, 0, 0, 0,
6720 0, 0, 0, 0,
6721 0, 0, 0, 0,
6722 0, 6, 7, 0,
6723 0, 8, 9, 0,
6724 0, 10, 11, 0,
6725 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006727 0, 0, 0, 0,
6728 0, 0, 0, 0,
6729 0, 0, 0, 0,
6730 0, 0, 0, 0,
6731 0, 0, 0, 0,
6732 0, 0, 0, 0,
6733 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006734
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006735 0, 0, 0, 0,
6736 0, 0, 0, 0,
6737 0, 0, 0, 0,
6738 0, 0, 0, 0,
6739 0, 0, 0, 0,
6740 0, 0, 0, 0,
6741 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006742
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006743 0, 0, 0, 0,
6744 0, 0, 0, 0,
6745 0, 0, 0, 0,
6746 0, 0, 0, 0,
6747 0, 0, 0, 0,
6748 0, 0, 0, 0,
6749 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006751 0, 0, 0, 0,
6752 0, 0, 0, 0,
6753 0, 0, 0, 0,
6754 0, 12, 13, 0,
6755 0, 14, 15, 0,
6756 0, 16, 17, 0,
6757 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006758
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006759 0, 0, 0, 0,
6760 0, 0, 0, 0,
6761 0, 0, 0, 0,
6762 0, 18, 19, 0,
6763 0, 20, 21, 0,
6764 0, 22, 23, 0,
6765 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006766
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006767 0, 0, 0, 0,
6768 0, 0, 0, 0,
6769 0, 0, 0, 0,
6770 0, 0, 0, 0,
6771 0, 0, 0, 0,
6772 0, 0, 0, 0,
6773 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006774
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006775 0, 0, 0, 0,
6776 0, 0, 0, 0,
6777 0, 0, 0, 0,
6778 0, 0, 0, 0,
6779 0, 0, 0, 0,
6780 0, 0, 0, 0,
6781 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006782
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006783 0, 0, 0, 0,
6784 0, 0, 0, 0,
6785 0, 0, 0, 0,
6786 0, 0, 0, 0,
6787 0, 0, 0, 0,
6788 0, 0, 0, 0,
6789 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006790
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006791 0, 0, 0, 0,
6792 0, 0, 0, 0,
6793 0, 0, 0, 0,
6794 0, 0, 0, 0,
6795 0, 0, 0, 0,
6796 0, 0, 0, 0,
6797 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006798
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006799 0, 0, 0, 0,
6800 0, 0, 0, 0,
6801 0, 0, 0, 0,
6802 0, 0, 0, 0,
6803 0, 0, 0, 0,
6804 0, 0, 0, 0,
6805 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006806
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006807 0, 0, 0, 0,
6808 0, 0, 0, 0,
6809 0, 0, 0, 0,
6810 0, 0, 0, 0,
6811 0, 0, 0, 0,
6812 0, 0, 0, 0,
6813 0, 0, 0, 0
6814 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006816 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006817
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006818 LayerTestResult<T, 4> result(outputTensorInfo);
6819 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006820
6821 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6823
6824 armnn::PadQueueDescriptor descriptor;
6825
6826 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6827 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6828 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6829 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6830 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6831
6832 descriptor.m_Parameters.m_PadList = PadList;
6833 armnn::WorkloadInfo info;
6834
6835 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6836 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6837
6838 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6839
6840 inputHandle->Allocate();
6841 outputHandle->Allocate();
6842
6843 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6844
Derek Lambertif30f7d32019-04-09 10:25:02 +01006845 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006846 workload->Execute();
6847
6848 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6849
6850 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006851}
6852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006853LayerTestResult<uint8_t, 2> PadUint82dTest(
6854 armnn::IWorkloadFactory& workloadFactory,
6855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006856{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006857 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006858}
6859
David Monahan34757812019-06-19 11:47:21 +01006860LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6861 armnn::IWorkloadFactory& workloadFactory,
6862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6863{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006864 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006865}
6866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006867LayerTestResult<uint8_t, 3> PadUint83dTest(
6868 armnn::IWorkloadFactory& workloadFactory,
6869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006870{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006871 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006872}
6873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006874LayerTestResult<uint8_t, 4> PadUint84dTest(
6875 armnn::IWorkloadFactory& workloadFactory,
6876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006877{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006878 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006879}
6880
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006881
6882template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6883Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6884 armnn::IWorkloadFactory& workloadFactory,
6885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6886 float qScale,
6887 int32_t qOffset,
6888 const float customPaddingValue);
6889
6890template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6891Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6892 armnn::IWorkloadFactory& workloadFactory,
6893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6894 float qScale,
6895 int32_t qOffset);
6896
6897template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6898Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6899 armnn::IWorkloadFactory& workloadFactory,
6900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6901 float qScale,
6902 int32_t qOffset);
6903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006904LayerTestResult<float, 2> PadFloat322dTest(
6905 armnn::IWorkloadFactory& workloadFactory,
6906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006907{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006908 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006909}
6910
David Monahan34757812019-06-19 11:47:21 +01006911LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6912 armnn::IWorkloadFactory& workloadFactory,
6913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6914{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006915 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006916}
6917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006918LayerTestResult<float, 3> PadFloat323dTest(
6919 armnn::IWorkloadFactory& workloadFactory,
6920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006921{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006922 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006923}
6924
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006925LayerTestResult<float, 4> PadFloat324dTest(
6926 armnn::IWorkloadFactory& workloadFactory,
6927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006928{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006929 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006930}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006931
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006932template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006933LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6934 armnn::IWorkloadFactory& workloadFactory,
6935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6936 float scale,
6937 int32_t offset,
6938 float outScale,
6939 int32_t outOffset,
6940 const armnn::DataLayout layout,
6941 float epsilon)
6942{
6943 // Width: 1
6944 // Height: 1
6945 // Channels: 3
6946 // BatchSize: 1
6947 unsigned int numberOfBatches = 1;
6948 unsigned int numberOfChannels = 3;
6949 unsigned int height = 1;
6950 unsigned int width = 1;
6951
6952 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6953 numberOfBatches, numberOfChannels, height, width, layout);
6954
6955 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6956 std::vector<float> inputValues
6957 {
6958 // Batch 0, Channel 0, Height (1) x Width (1)
6959 0.00000001f,
6960
6961 // Batch 0, Channel 1, Height (1) x Width (1)
6962 0.00000002f,
6963
6964 // Batch 0, Channel 2, Height (1) x Width (1)
6965 0.00000003f,
6966 };
6967
6968 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6969 std::vector<float> expectedOutputValues
6970 {
6971 // Batch 0, Channel 0, Height (1) x Width (1)
6972 0.00000001f * approxInvL2Norm,
6973 0.00000002f * approxInvL2Norm,
6974 0.00000003f * approxInvL2Norm,
6975 };
6976
6977 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6978 inputValues, outScale, outOffset, expectedOutputValues, layout,
6979 epsilon);
6980}
6981
6982
6983template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006984LayerTestResult<T, 4> L2Normalization1dTestCommon(
6985 armnn::IWorkloadFactory& workloadFactory,
6986 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006987 float scale,
6988 int32_t offset,
6989 float outScale,
6990 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006991 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006992{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006993 // Width: 1
6994 // Height: 1
6995 // Channels: 10
6996 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006997 unsigned int numberOfBatches = 1;
6998 unsigned int numberOfChannels = 10;
6999 unsigned int height = 1;
7000 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00007001
jimfly013aab7c32018-11-12 13:32:08 +00007002
Nina Drozdd41b2592018-11-19 13:03:36 +00007003 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007004 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007005 std::vector<float> inputValues
7006 {
7007 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007008 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00007009
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007010 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007011 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00007012
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007013 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007014 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00007015
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007016 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007017 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007018
7019 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007020 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007021
7022 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007023 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007024
7025 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007026 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007027
7028 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007029 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007030
7031 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007032 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007033
7034 // Batch 0, Channel 9, Height (1) x Width (1)
7035 10.0f
7036 };
telsoa014fcda012018-03-09 14:13:49 +00007037 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007038 std::vector<float> expectedOutputValues
7039 {
7040 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007041 1.0f * approxInvL2Norm,
7042 2.0f * approxInvL2Norm,
7043 3.0f * approxInvL2Norm,
7044 4.0f * approxInvL2Norm,
7045 5.0f * approxInvL2Norm,
7046 6.0f * approxInvL2Norm,
7047 7.0f * approxInvL2Norm,
7048 8.0f * approxInvL2Norm,
7049 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00007050 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007051 };
telsoa014fcda012018-03-09 14:13:49 +00007052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007053
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007054 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7055 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00007056}
7057
Ferran Balaguere52211e2019-06-17 12:23:52 +01007058LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7059 armnn::IWorkloadFactory& workloadFactory,
7060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7061 const armnn::DataLayout layout)
7062{
7063 // Dummy descriptor to get the default value of epsilon.
7064 armnn::L2NormalizationDescriptor descriptor;
7065
7066 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7067 layout, descriptor.m_Eps);
7068}
7069
7070LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7071 armnn::IWorkloadFactory& workloadFactory,
7072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7073 const armnn::DataLayout layout)
7074{
7075 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7076 layout, 1e-9f);
7077}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007078
7079LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007080 armnn::IWorkloadFactory& workloadFactory,
7081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007082 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007083{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007084 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007085}
7086
7087LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7088 armnn::IWorkloadFactory& workloadFactory,
7089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7090 const armnn::DataLayout layout)
7091{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007092 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007093 layout);
7094}
7095
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007096LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7097 armnn::IWorkloadFactory& workloadFactory,
7098 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7099 const armnn::DataLayout layout)
7100{
7101 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7102 1.f/128, 128, layout);
7103}
7104
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007105template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7106LayerTestResult<T, 4> L2Normalization2dTestCommon(
7107 armnn::IWorkloadFactory& workloadFactory,
7108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007109 float scale,
7110 int32_t offset,
7111 float outScale,
7112 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007113 const armnn::DataLayout layout)
7114{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007115 // Width: 5
7116 // Height: 1
7117 // Channels: 2
7118 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007119 unsigned int numberOfBatches = 1;
7120 unsigned int numberOfChannels = 2;
7121 unsigned int height = 1;
7122 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00007123
Nina Drozdd41b2592018-11-19 13:03:36 +00007124 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007125 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007126 std::vector<float> inputValues
7127 {
7128 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00007129 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00007130
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007131 // Batch 0, Channel 1, Height (1) x Width (5)
7132 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7133 };
7134 std::vector<float> expectedOutputValues
7135 {
7136 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007137 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7138 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7139 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7140 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7141 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007142
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007143 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007144 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7145 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7146 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7147 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007148 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007149 };
telsoa014fcda012018-03-09 14:13:49 +00007150
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007151 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7152 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007153}
telsoa014fcda012018-03-09 14:13:49 +00007154
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007155LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007156 armnn::IWorkloadFactory& workloadFactory,
7157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007158 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007159{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007160 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7161 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007162}
7163
7164LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7165 armnn::IWorkloadFactory& workloadFactory,
7166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7167 const armnn::DataLayout layout)
7168{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007169 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007170 layout);
7171}
7172
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007173LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7174 armnn::IWorkloadFactory& workloadFactory,
7175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7176 const armnn::DataLayout layout)
7177{
7178 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7179 1.f/128, 128, layout);
7180}
7181
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007182template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7183LayerTestResult<T, 4> L2Normalization3dTestCommon(
7184 armnn::IWorkloadFactory& workloadFactory,
7185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007186 float scale,
7187 int32_t offset,
7188 float outScale,
7189 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007190 const armnn::DataLayout layout)
7191{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007192 // Width: 3
7193 // Height: 4
7194 // Channels: 2
7195 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007196 unsigned int numberOfBatches = 1;
7197 unsigned int numberOfChannels = 2;
7198 unsigned int height = 4;
7199 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007200
Nina Drozdd41b2592018-11-19 13:03:36 +00007201 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007202 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007203 std::vector<float> inputValues
7204 {
7205 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007206 119.0f, 21.0f, 150.0f,
7207 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007208 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00007209 147.0f, 199.0f, 220.0f,
7210
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007211 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007212 110.0f, 140.0f, 73.0f,
7213 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007214 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007215 162.0f, 12.0f, 161.0f
7216 };
7217 std::vector<float> expectedOutputValues
7218 {
7219 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007220 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007221 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007222 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
7223 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007224 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007225 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007226 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007227 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7228 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7229 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7230 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
7231 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7232
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007233 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007234 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7235 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007236 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007237 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7238 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007239 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
7240 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007241 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7242 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7243 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007244 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007245 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7246 };
telsoa014fcda012018-03-09 14:13:49 +00007247
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007248 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7249 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007250}
telsoa014fcda012018-03-09 14:13:49 +00007251
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007252LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007253 armnn::IWorkloadFactory& workloadFactory,
7254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007255 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007256{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007257 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7258 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007259}
7260
7261LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7262 armnn::IWorkloadFactory& workloadFactory,
7263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7264 const armnn::DataLayout layout)
7265{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007266 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007267 layout);
7268}
7269
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007270LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7271 armnn::IWorkloadFactory& workloadFactory,
7272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7273 const armnn::DataLayout layout)
7274{
7275 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7276 1.f/128, 128, layout);
7277}
7278
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007279template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7280LayerTestResult<T, 4> L2Normalization4dTestCommon(
7281 armnn::IWorkloadFactory& workloadFactory,
7282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007283 float scale,
7284 int32_t offset,
7285 float outScale,
7286 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007287 const armnn::DataLayout layout)
7288{
7289 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007290 // Height: 4
7291 // Channels: 3
7292 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00007293 unsigned int numberOfBatches = 2;
7294 unsigned int numberOfChannels = 3;
7295 unsigned int height = 4;
7296 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007297
Nina Drozdd41b2592018-11-19 13:03:36 +00007298 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007299 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007300 std::vector<float> inputValues
7301 {
7302 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007303 235.0f, 46.0f, 178.0f,
7304 100.0f, 123.0f, 19.0f,
7305 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007306 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00007307
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007308 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007309 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007310 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00007311 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007312 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00007313
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007314 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007315 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00007316 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007317 12.0f, 209.0f, 200.0f,
7318 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00007319
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007320 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007321 67.0f, 90.0f, 49.0f,
7322 7.0f, 163.0f, 18.0f,
7323 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00007324 247.0f, 59.0f, 189.0f,
7325
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007326 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007327 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007328 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00007329 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007330 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00007331
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007332 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007333 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00007334 115.0f, 116.0f, 238.0f,
7335 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007336 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007337 };
7338 std::vector<float> expectedOutputValues
7339 {
7340 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007341 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007342 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007343 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7344 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7345 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007346 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007347 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007348 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007349 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007350 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007351 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007352 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007353
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007354 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007355 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007356 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007357 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007358 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007359 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007360 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007361 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7362 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7363 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007364 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7365 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7366 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007367
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007368 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007369 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007370 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7371 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7372 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007373 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007374 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007375 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007376 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7377 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007378 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7379 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7380 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007381
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007382 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007383 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7384 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7385 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7386 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007387 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007388 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7389 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007390 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7391 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7392 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007393 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007394 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7395
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007396 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007397 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7398 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7399 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007400 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007401 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7402 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7403 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7404 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007405 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7406 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007407 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007408 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007409
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007410 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007411 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007412 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7413 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7414 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7415 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7416 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7417 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007418 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007419 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007420 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007421 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007422 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007423 };
telsoa014fcda012018-03-09 14:13:49 +00007424
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007425 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7426 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007427}
7428
7429LayerTestResult<float, 4> L2Normalization4dTest(
7430 armnn::IWorkloadFactory& workloadFactory,
7431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7432 const armnn::DataLayout layout)
7433{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007434 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7435 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007436}
7437
7438LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7439 armnn::IWorkloadFactory& workloadFactory,
7440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7441 const armnn::DataLayout layout)
7442{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007443 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007444 layout);
telsoa014fcda012018-03-09 14:13:49 +00007445}
7446
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007447LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7448 armnn::IWorkloadFactory& workloadFactory,
7449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7450 const armnn::DataLayout layout)
7451{
7452 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7453 1.f/128, 128, layout);
7454}
7455
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007456template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007457LayerTestResult<T, 4> ConstantTestImpl(
7458 armnn::IWorkloadFactory& workloadFactory,
7459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007460 float qScale,
7461 int32_t qOffset)
7462{
7463 constexpr unsigned int inputWidth = 3;
7464 constexpr unsigned int inputHeight = 4;
7465 constexpr unsigned int inputChannels = 3;
7466 constexpr unsigned int inputBatchSize = 2;
7467
7468 constexpr unsigned int outputWidth = inputWidth;
7469 constexpr unsigned int outputHeight = inputHeight;
7470 constexpr unsigned int outputChannels = inputChannels;
7471 constexpr unsigned int outputBatchSize = inputBatchSize;
7472
Nina Drozd58ef2c62019-05-16 12:09:18 +01007473 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7474 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007475
Nina Drozd58ef2c62019-05-16 12:09:18 +01007476 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7477 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007478
7479 // Set quantization parameters if the requested type is a quantized type.
7480 if(armnn::IsQuantizedType<T>())
7481 {
7482 inputTensorInfo.SetQuantizationScale(qScale);
7483 inputTensorInfo.SetQuantizationOffset(qOffset);
7484 outputTensorInfo.SetQuantizationScale(qScale);
7485 outputTensorInfo.SetQuantizationOffset(qOffset);
7486 }
7487
7488 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7489 QuantizedVector<T>(qScale, qOffset, {
7490 // Batch 0, Channel 0
7491 235.0f, 46.0f, 178.0f,
7492 100.0f, 123.0f, 19.0f,
7493 172.0f, 74.0f, 250.0f,
7494 6.0f, 195.0f, 80.0f,
7495
7496 // Batch 0, Channel 1
7497 113.0f, 95.0f, 202.0f,
7498 77.0f, 114.0f, 71.0f,
7499 122.0f, 246.0f, 166.0f,
7500 82.0f, 28.0f, 37.0f,
7501
7502 // Batch 0, Channel 2
7503 56.0f, 170.0f, 162.0f,
7504 194.0f, 89.0f, 254.0f,
7505 12.0f, 209.0f, 200.0f,
7506 1.0f, 64.0f, 54.0f,
7507
7508 // Batch 1, Channel 0
7509 67.0f, 90.0f, 49.0f,
7510 7.0f, 163.0f, 18.0f,
7511 25.0f, 117.0f, 103.0f,
7512 247.0f, 59.0f, 189.0f,
7513
7514 // Batch 1, Channel 1
7515 239.0f, 104.0f, 199.0f,
7516 17.0f, 124.0f, 153.0f,
7517 222.0f, 217.0f, 75.0f,
7518 32.0f, 126.0f, 21.0f,
7519
7520 // Batch 1, Channel 2
7521 97.0f, 145.0f, 215.0f,
7522 115.0f, 116.0f, 238.0f,
7523 226.0f, 16.0f, 132.0f,
7524 92.0f, 125.0f, 88.0f,
7525 })));
7526
7527 LayerTestResult<T, 4> result(outputTensorInfo);
7528 result.outputExpected = input;
7529
7530 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7531
7532 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7533 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7534
7535 armnn::ConstantQueueDescriptor descriptor;
7536 descriptor.m_LayerOutput = &constantTensor;
7537
7538 armnn::WorkloadInfo info;
7539 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7540
7541 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7542
7543 outputHandle->Allocate();
7544
Derek Lambertif30f7d32019-04-09 10:25:02 +01007545 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007546 workload->Execute();
7547
7548 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7549 return result;
7550}
7551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007552LayerTestResult<float, 4> ConstantTest(
7553 armnn::IWorkloadFactory& workloadFactory,
7554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007555{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007556 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007557}
7558
Nina Drozd58ef2c62019-05-16 12:09:18 +01007559LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7560 armnn::IWorkloadFactory& workloadFactory,
7561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7562{
7563 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7564}
7565
7566LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007567 armnn::IWorkloadFactory& workloadFactory,
7568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007569{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007570 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007571}
7572
Jim Flynn4ed6c832019-05-20 11:02:46 +01007573LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007574 armnn::IWorkloadFactory& workloadFactory,
7575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7576{
7577 unsigned int outputWidth = 3;
7578 unsigned int outputHeight = 6;
7579 unsigned int outputChannels = 3;
7580
7581 unsigned int inputWidth1 = 3;
7582 unsigned int inputHeight1 = 6;
7583 unsigned int inputChannels1 = 2;
7584
7585 unsigned int inputWidth2 = 3;
7586 unsigned int inputHeight2 = 6;
7587 unsigned int inputChannels2 = 1;
7588
7589 // Defines the tensor descriptors.
7590 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7591 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7592 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7593
7594 // Quantized input1 tensor. Range [-3, 1]
7595 const float inputScale1 = 0.015686f;
7596 const int32_t inputOffset1 = 192;
7597
7598 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7599 {
7600 1, 2, 3,
7601 4, 5, 6,
7602 7, 8, 9,
7603 10, 11, 12,
7604 13, 14, 15,
7605 16, 17, 18,
7606
7607 19, 20, 21,
7608 22, 23, 24,
7609 25, 26, 27,
7610 28, 29, 30,
7611 31, 32, 33,
7612 34, 35, 36,
7613 })
7614 );
7615
7616 // Quatized input2 tensor. Range [-1, 4]
7617 const float inputScale2 = 0.019608f;
7618 const int32_t inputOffset2 = 50;
7619
7620 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7621 {
7622 37, 38, 39,
7623 40, 41, 42,
7624 43, 44, 45,
7625 46, 47, 48,
7626 49, 50, 51,
7627 52, 53, 54,
7628 })
7629 );
7630
7631 // Output has the same quantization parameters than input1,
7632 // so that only the requantization of input2 is required
7633 const float outputScale = 0.015686f;
7634 const int32_t outputOffset = 192;
7635
7636 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7637
7638 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7639 {
7640 1, 2, 3,
7641 4, 5, 6,
7642 7, 8, 9,
7643 10, 11, 12,
7644 13, 14, 15,
7645 16, 17, 18,
7646
7647 19, 20, 21,
7648 22, 23, 24,
7649 25, 26, 27,
7650 28, 29, 30,
7651 31, 32, 33,
7652 34, 35, 36,
7653
7654 176, 177, 178,
7655 179, 181, 182,
7656 183, 184, 186,
7657 187, 188, 189,
7658 191, 192, 193,
7659 195, 196, 197,
7660 })
7661 );
7662
7663 outputTensorInfo.SetQuantizationScale(outputScale);
7664 outputTensorInfo.SetQuantizationOffset(outputOffset);
7665 inputTensorInfo1.SetQuantizationScale(inputScale1);
7666 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7667 inputTensorInfo2.SetQuantizationScale(inputScale2);
7668 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7669
7670 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007671 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007672
7673 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007674 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007675
7676 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7677
7678 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7679
7680 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7681 subTensorsSupported ?
7682 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7683 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7684
7685 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7686 subTensorsSupported ?
7687 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7688 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7689
Jim Flynne242f2d2019-05-22 14:24:13 +01007690 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007691 armnn::WorkloadInfo info;
7692 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7693 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7694 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7695
7696 data.m_ViewOrigins.push_back(window1);
7697 data.m_ViewOrigins.push_back(window2);
7698
Jim Flynn4ed6c832019-05-20 11:02:46 +01007699 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007700
7701 inputHandle1->Allocate();
7702 inputHandle2->Allocate();
7703 outputHandle->Allocate();
7704
7705 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7706 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7707
Derek Lambertif30f7d32019-04-09 10:25:02 +01007708 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007709 workload->Execute();
7710
7711 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7712
7713 return ret;
7714}
7715
Jim Flynn4ed6c832019-05-20 11:02:46 +01007716LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007717 armnn::IWorkloadFactory& workloadFactory,
7718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007719{
surmeh013537c2c2018-05-18 16:31:43 +01007720 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007721 unsigned int outputHeight = 6;
7722 unsigned int outputChannels = 3;
7723
surmeh013537c2c2018-05-18 16:31:43 +01007724 unsigned int inputWidth1 = 3;
7725 unsigned int inputHeight1 = 6;
7726 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007727
surmeh013537c2c2018-05-18 16:31:43 +01007728 unsigned int inputWidth2 = 3;
7729 unsigned int inputHeight2 = 6;
7730 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007731
telsoa01c577f2c2018-08-31 09:22:23 +01007732 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007733 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7734 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7735 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007736
Jim Flynn4ed6c832019-05-20 11:02:46 +01007737 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007738 const float scale = 0.13497836f;
7739 const int32_t offset = -7;
7740
7741 outputTensorInfo.SetQuantizationScale(scale);
7742 outputTensorInfo.SetQuantizationOffset(offset);
7743 inputTensorInfo1.SetQuantizationScale(scale);
7744 inputTensorInfo1.SetQuantizationOffset(offset);
7745 inputTensorInfo2.SetQuantizationScale(scale);
7746 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007747
7748 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7749
7750 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007751 {
7752 1, 2, 3,
7753 4, 5, 6,
7754 7, 8, 9,
7755 10, 11, 12,
7756 13, 14, 15,
7757 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007758
surmeh013537c2c2018-05-18 16:31:43 +01007759 19, 20, 21,
7760 22, 23, 24,
7761 25, 26, 27,
7762 28, 29, 30,
7763 31, 32, 33,
7764 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007765
surmeh013537c2c2018-05-18 16:31:43 +01007766 37, 38, 39,
7767 40, 41, 42,
7768 43, 44, 45,
7769 46, 47, 48,
7770 49, 50, 51,
7771 52, 53, 54,
7772 })
telsoa014fcda012018-03-09 14:13:49 +00007773 );
7774
telsoa014fcda012018-03-09 14:13:49 +00007775 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7776 {
surmeh013537c2c2018-05-18 16:31:43 +01007777 1, 2, 3,
7778 4, 5, 6,
7779 7, 8, 9,
7780 10, 11, 12,
7781 13, 14, 15,
7782 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007783
surmeh013537c2c2018-05-18 16:31:43 +01007784 19, 20, 21,
7785 22, 23, 24,
7786 25, 26, 27,
7787 28, 29, 30,
7788 31, 32, 33,
7789 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007790 })
7791 );
7792
7793 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7794 {
surmeh013537c2c2018-05-18 16:31:43 +01007795 37, 38, 39,
7796 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007797 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007798 46, 47, 48,
7799 49, 50, 51,
7800 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007801 })
7802 );
7803
telsoa01c577f2c2018-08-31 09:22:23 +01007804 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007805 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007806
telsoa01c577f2c2018-08-31 09:22:23 +01007807 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007808 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007809
telsoa014fcda012018-03-09 14:13:49 +00007810
7811 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7812
7813 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7814
7815 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7816 subTensorsSupported ?
7817 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7818 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7819
7820 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7821 subTensorsSupported ?
7822 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7823 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7824
telsoa014fcda012018-03-09 14:13:49 +00007825
Jim Flynne242f2d2019-05-22 14:24:13 +01007826 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007827 armnn::WorkloadInfo info;
7828 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7829 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007830 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7831
7832 data.m_ViewOrigins.push_back(window1);
7833 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007834
Jim Flynn4ed6c832019-05-20 11:02:46 +01007835 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007836
7837 inputHandle1->Allocate();
7838 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007839 outputHandle->Allocate();
7840
7841 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7842 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007843
Derek Lambertif30f7d32019-04-09 10:25:02 +01007844 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007845 workload->Execute();
7846
7847 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7848
7849 return ret;
7850}
7851
Jim Flynn4ed6c832019-05-20 11:02:46 +01007852LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007853 armnn::IWorkloadFactory& workloadFactory,
7854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7855{
7856 unsigned int outputWidth = 3;
7857 unsigned int outputHeight = 6;
7858 unsigned int outputChannels = 3;
7859
7860 unsigned int inputWidth1 = 3;
7861 unsigned int inputHeight1 = 6;
7862 unsigned int inputChannels1 = 2;
7863
7864 unsigned int inputWidth2 = 3;
7865 unsigned int inputHeight2 = 6;
7866 unsigned int inputChannels2 = 1;
7867
7868 // Defines the tensor descriptors.
7869 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7870 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7871 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7872
Jim Flynn4ed6c832019-05-20 11:02:46 +01007873 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007874 const float scale = 0.13497836f;
7875 const int32_t offset = -7;
7876
7877 outputTensorInfo.SetQuantizationScale(scale);
7878 outputTensorInfo.SetQuantizationOffset(offset);
7879 inputTensorInfo1.SetQuantizationScale(scale);
7880 inputTensorInfo1.SetQuantizationOffset(offset);
7881 inputTensorInfo2.SetQuantizationScale(scale);
7882 inputTensorInfo2.SetQuantizationOffset(offset);
7883
7884 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7885
7886 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7887 {
7888 1, 2, 3,
7889 4, 5, 6,
7890 7, 8, 9,
7891 10, 11, 12,
7892 13, 14, 15,
7893 16, 17, 18,
7894
7895 19, 20, 21,
7896 22, 23, 24,
7897 25, 26, 27,
7898 28, 29, 30,
7899 31, 32, 33,
7900 34, 35, 36,
7901
7902 37, 38, 39,
7903 40, 41, 42,
7904 43, 44, 45,
7905 46, 47, 48,
7906 49, 50, 51,
7907 52, 53, 54,
7908 }));
7909
7910 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7911 {
7912 1, 2, 3,
7913 4, 5, 6,
7914 7, 8, 9,
7915 10, 11, 12,
7916 13, 14, 15,
7917 16, 17, 18,
7918
7919 19, 20, 21,
7920 22, 23, 24,
7921 25, 26, 27,
7922 28, 29, 30,
7923 31, 32, 33,
7924 34, 35, 36,
7925 }));
7926
7927 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7928 {
7929 37, 38, 39,
7930 40, 41, 42,
7931 43, 44, 45,
7932 46, 47, 48,
7933 49, 50, 51,
7934 52, 53, 54,
7935 }));
7936
7937 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007938 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007939
7940 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007941 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007942
7943
7944 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7945
7946 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7947
7948 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7949 subTensorsSupported ?
7950 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7951 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7952
7953 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7954 subTensorsSupported ?
7955 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7956 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7957
7958
Jim Flynne242f2d2019-05-22 14:24:13 +01007959 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007960 armnn::WorkloadInfo info;
7961 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7962 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7963 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7964
7965 data.m_ViewOrigins.push_back(window1);
7966 data.m_ViewOrigins.push_back(window2);
7967
Jim Flynn4ed6c832019-05-20 11:02:46 +01007968 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007969
7970 inputHandle1->Allocate();
7971 inputHandle2->Allocate();
7972 outputHandle->Allocate();
7973
7974 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7975 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7976
7977 workload->PostAllocationConfigure();
7978 workload->Execute();
7979
7980 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7981
7982 return ret;
7983}
telsoa014fcda012018-03-09 14:13:49 +00007984
surmeh01bceff2f2018-03-29 16:29:27 +01007985namespace
telsoa014fcda012018-03-09 14:13:49 +00007986{
Sadik Armagan2999a022019-04-09 14:20:12 +01007987template <typename T>
7988LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007989 armnn::IWorkloadFactory& workloadFactory,
7990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7991 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007992 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007993 float scale0,
7994 int32_t offset0,
7995 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007996 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007997 float scale1,
7998 int32_t offset1,
7999 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008000 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008001 float outScale,
8002 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01008003{
Sadik Armagan2999a022019-04-09 14:20:12 +01008004 auto dataType = (std::is_same<T, uint8_t>::value ?
8005 armnn::DataType::QuantisedAsymm8 :
8006 armnn::DataType::QuantisedSymm16);
8007
8008 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8009 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8010 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00008011
surmeh01bceff2f2018-03-29 16:29:27 +01008012 inputTensorInfo0.SetQuantizationScale(scale0);
8013 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00008014
surmeh01bceff2f2018-03-29 16:29:27 +01008015 inputTensorInfo1.SetQuantizationScale(scale1);
8016 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00008017
surmeh01bceff2f2018-03-29 16:29:27 +01008018 outputTensorInfo.SetQuantizationScale(outScale);
8019 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00008020
Sadik Armagan2999a022019-04-09 14:20:12 +01008021 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8022 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00008023
Sadik Armagan2999a022019-04-09 14:20:12 +01008024 LayerTestResult<T, 4> result(outputTensorInfo);
8025 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8026
8027 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8028 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8029 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8030
8031 armnn::AdditionQueueDescriptor data;
8032 armnn::WorkloadInfo info;
8033 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8034 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8035 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8036
8037 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8038
8039 inputHandle0->Allocate();
8040 inputHandle1->Allocate();
8041 outputHandle->Allocate();
8042
8043 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8044 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8045
Derek Lambertif30f7d32019-04-09 10:25:02 +01008046 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01008047 workload->Execute();
8048
8049 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8050
8051 return result;
8052}
8053} // anonymous namespace
8054
8055LayerTestResult<uint8_t, 4> AdditionUint8Test(
8056 armnn::IWorkloadFactory& workloadFactory,
8057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8058{
8059 const unsigned int shape0[] = { 1, 2, 2, 3 };
8060 const unsigned int shape1[] = { 1, 2, 2, 3 };
8061
8062 std::vector<uint8_t> input0(
8063 {
8064 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
8065 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
8066 });
8067
8068 std::vector<uint8_t> input1(
8069 {
8070 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8071 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8072 });
8073
8074 std::vector<uint8_t> output(
8075 {
8076 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
8077 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8078 });
8079
8080 return AdditionQuantizeTestHelper(workloadFactory,
8081 memoryManager,
8082 shape0, input0, 7.0f, 3,
8083 shape1, input1, 7.0f, 3,
8084 shape0, output, 7.0f, 3);
8085}
8086
8087LayerTestResult<int16_t, 4> AdditionInt16Test(
8088 armnn::IWorkloadFactory& workloadFactory,
8089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8090{
8091 const unsigned int shape0[] = { 1, 2, 2, 3 };
8092 const unsigned int shape1[] = { 1, 2, 2, 3 };
8093
8094 std::vector<int16_t> input0(
8095 {
8096 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
8097 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
8098 });
8099
8100 std::vector<int16_t> input1(
8101 {
8102 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8103 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8104 });
8105
8106 std::vector<int16_t> output(
8107 {
8108 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
8109 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8110 });
8111
8112 return AdditionQuantizeTestHelper(workloadFactory,
8113 memoryManager,
8114 shape0, input0, 7.0f, 0,
8115 shape1, input1, 7.0f, 0,
8116 shape0, output, 7.0f, 0);
8117}
8118
8119namespace
8120{
8121template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8122LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8123 armnn::IWorkloadFactory& workloadFactory,
8124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8125 const unsigned int shape0[4],
8126 const std::vector<T> & values0,
8127 float scale0,
8128 int32_t offset0,
8129 const unsigned int shape1[4],
8130 const std::vector<T> & values1,
8131 float scale1,
8132 int32_t offset1,
8133 const unsigned int outShape[4],
8134 const std::vector<T> & outValues,
8135 float outScale,
8136 int32_t outOffset)
8137{
8138 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8139 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8140 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8141
8142 inputTensorInfo0.SetQuantizationScale(scale0);
8143 inputTensorInfo0.SetQuantizationOffset(offset0);
8144
8145 inputTensorInfo1.SetQuantizationScale(scale1);
8146 inputTensorInfo1.SetQuantizationOffset(offset1);
8147
8148 outputTensorInfo.SetQuantizationScale(outScale);
8149 outputTensorInfo.SetQuantizationOffset(outOffset);
8150
8151 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8152 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8153
8154 LayerTestResult<T, 4> result(outputTensorInfo);
8155 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00008156
surmeh01bceff2f2018-03-29 16:29:27 +01008157 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00008158 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00008159 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8160
8161 armnn::MultiplicationQueueDescriptor data;
8162 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01008163 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8164 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00008165 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8166
8167 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8168
surmeh01bceff2f2018-03-29 16:29:27 +01008169 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008170 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008171 outputHandle->Allocate();
8172
surmeh01bceff2f2018-03-29 16:29:27 +01008173 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008174 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008175
Derek Lambertif30f7d32019-04-09 10:25:02 +01008176 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00008177 workload->Execute();
8178
8179 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8180
8181 return result;
8182}
surmeh01bceff2f2018-03-29 16:29:27 +01008183} // anonymous namespace
8184
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008185LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8186 armnn::IWorkloadFactory& workloadFactory,
8187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008188{
8189 unsigned int batchSize = 1;
8190 unsigned int channels = 2;
8191 unsigned int height = 2;
8192 unsigned int width = 3;
8193 const unsigned int shape[] = { batchSize, channels, height, width };
8194
telsoa01c577f2c2018-08-31 09:22:23 +01008195 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008196 std::vector<uint8_t> input0({
8197 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
8198 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
8199 });
8200
telsoa01c577f2c2018-08-31 09:22:23 +01008201 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008202 std::vector<uint8_t> input1({
8203 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8204 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
8205 });
8206
telsoa01c577f2c2018-08-31 09:22:23 +01008207 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008208 std::vector<uint8_t> output(
8209 {
8210 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8211 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
8212 });
8213
Sadik Armagan2999a022019-04-09 14:20:12 +01008214 // Scale/offset chosen to have output values out of range.
8215 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8216 memoryManager,
8217 shape,
8218 input0,
8219 4.0f,
8220 1,
8221 shape,
8222 input1,
8223 3.0f,
8224 -2,
8225 shape,
8226 output,
8227 1366.255f,
8228 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01008229}
8230
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008231LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8232 armnn::IWorkloadFactory& workloadFactory,
8233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008234{
8235 const unsigned int shape0[] = { 1, 2, 2, 3 };
8236 const unsigned int shape1[] = { 1, 1, 1, 1 };
8237
8238 std::vector<uint8_t> input0({
8239 1, 2, 3, 4, 5, 6,
8240 7, 8, 9, 10, 11, 12
8241 });
8242
8243 std::vector<uint8_t> input1({2});
8244
8245 std::vector<uint8_t> output({
8246 2, 4, 6, 8, 10, 12,
8247 14, 16, 18, 20, 22, 24
8248 });
8249
Sadik Armagan2999a022019-04-09 14:20:12 +01008250 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8251 memoryManager,
8252 shape0,
8253 input0,
8254 1.0f,
8255 0,
8256 shape1,
8257 input1,
8258 1.0f,
8259 0,
8260 shape0,
8261 output,
8262 1.0f,
8263 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008264}
8265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008266LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8267 armnn::IWorkloadFactory& workloadFactory,
8268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008269{
8270 const unsigned int shape0[] = { 1, 2, 2, 3 };
8271 const unsigned int shape1[] = { 1, 1, 1, 3 };
8272
8273 std::vector<uint8_t> input0({
8274 1, 2, 3, 4, 5, 6,
8275 7, 8, 9, 10, 11, 12
8276 });
8277
8278 std::vector<uint8_t> input1({1, 2, 3});
8279
8280 std::vector<uint8_t> output({
8281 1, 4, 9, 4, 10, 18,
8282 7, 16, 27, 10, 22, 36
8283 });
8284
Sadik Armagan2999a022019-04-09 14:20:12 +01008285 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8286 memoryManager,
8287 shape0,
8288 input0,
8289 1.0f,
8290 0,
8291 shape1,
8292 input1,
8293 1.0f,
8294 0,
8295 shape0,
8296 output,
8297 1.0f,
8298 0);
8299}
8300
8301LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8302 armnn::IWorkloadFactory& workloadFactory,
8303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8304{
8305 const unsigned int shape[] = { 1, 2, 2, 3 };
8306
8307 std::vector<int16_t> input0(
8308 {
8309 6, 7, 8, 9, 10, 11,
8310 12, 13, 14, 15, 16, 17
8311 });
8312
8313 std::vector<int16_t> input1(
8314 {
8315 1, 2, 3, 4, 5, 6,
8316 7, 8, 9, 10, 11, 12
8317 });
8318
8319 std::vector<int16_t> output(
8320 {
8321 6, 14, 24, 36, 50, 66,
8322 84, 104, 126, 150, 176, 204
8323 });
8324
8325 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8326 memoryManager,
8327 shape,
8328 input0,
8329 1.0f,
8330 0,
8331 shape,
8332 input1,
8333 1.0f,
8334 0,
8335 shape,
8336 output,
8337 1.0f,
8338 0);
8339}
8340
8341LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8342 armnn::IWorkloadFactory& workloadFactory,
8343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8344{
8345 const unsigned int shape0[] = { 1, 2, 2, 3 };
8346 const unsigned int shape1[] = { 1, 1, 1, 1 };
8347
8348 std::vector<int16_t> input0(
8349 {
8350 1, 2, 3, 4, 5, 6,
8351 7, 8, 9, 10, 11, 12
8352 });
8353
8354 std::vector<int16_t> input1({2});
8355
8356 std::vector<int16_t> output(
8357 {
8358 2, 4, 6, 8, 10, 12,
8359 14, 16, 18, 20, 22, 24
8360 });
8361
8362 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8363 memoryManager,
8364 shape0,
8365 input0,
8366 1.0f,
8367 0,
8368 shape1,
8369 input1,
8370 1.0f,
8371 0,
8372 shape0,
8373 output,
8374 1.0f,
8375 0);
8376}
8377
8378LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8379 armnn::IWorkloadFactory& workloadFactory,
8380 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8381{
8382 const unsigned int shape0[] = { 1, 2, 2, 3 };
8383 const unsigned int shape1[] = { 1, 1, 1, 3 };
8384
8385 std::vector<int16_t> input0(
8386 {
8387 1, 2, 3, 4, 5, 6,
8388 7, 8, 9, 10, 11, 12
8389 });
8390
8391 std::vector<int16_t> input1({1, 2, 3});
8392
8393 std::vector<int16_t> output(
8394 {
8395 1, 4, 9, 4, 10, 18,
8396 7, 16, 27, 10, 22, 36
8397 });
8398
8399 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8400 memoryManager,
8401 shape0,
8402 input0,
8403 1.0f,
8404 0,
8405 shape1,
8406 input1,
8407 1.0f,
8408 0,
8409 shape0,
8410 output,
8411 1.0f,
8412 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008413}
telsoa014fcda012018-03-09 14:13:49 +00008414
David Beckf195f032018-09-06 16:46:34 +01008415namespace
8416{
Sadik Armagan2999a022019-04-09 14:20:12 +01008417template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008418LayerTestResult<T, 4> SubtractionTestHelper(
8419 armnn::IWorkloadFactory& workloadFactory,
8420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8421 const unsigned int shape0[4],
8422 const std::vector<T>& values0,
8423 float scale0,
8424 int32_t offset0,
8425 const unsigned int shape1[4],
8426 const std::vector<T> & values1,
8427 float scale1,
8428 int32_t offset1,
8429 const unsigned int outShape[4],
8430 const std::vector<T> & outValues,
8431 float outScale,
8432 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008433{
Sadik Armagan2999a022019-04-09 14:20:12 +01008434 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8435 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8436 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008437
8438 inputTensorInfo0.SetQuantizationScale(scale0);
8439 inputTensorInfo0.SetQuantizationOffset(offset0);
8440
8441 inputTensorInfo1.SetQuantizationScale(scale1);
8442 inputTensorInfo1.SetQuantizationOffset(offset1);
8443
8444 outputTensorInfo.SetQuantizationScale(outScale);
8445 outputTensorInfo.SetQuantizationOffset(outOffset);
8446
8447 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8448 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8449
8450 LayerTestResult<T, 4> result(outputTensorInfo);
8451 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8452
8453 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8454 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8455 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8456
8457 armnn::SubtractionQueueDescriptor data;
8458 armnn::WorkloadInfo info;
8459 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8460 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8461 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8462
8463 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8464
8465 inputHandle0->Allocate();
8466 inputHandle1->Allocate();
8467 outputHandle->Allocate();
8468
8469 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8470 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8471
Derek Lambertif30f7d32019-04-09 10:25:02 +01008472 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008473 workload->Execute();
8474
8475 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8476
8477 return result;
8478}
8479} // anonymous namespace
8480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008481LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8482 armnn::IWorkloadFactory& workloadFactory,
8483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008484{
8485 const unsigned int shape0[] = { 1, 1, 2, 2 };
8486 const unsigned int shape1[] = { 1, 1, 2, 2 };
8487
8488 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8489 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8490 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8491
Sadik Armagan2999a022019-04-09 14:20:12 +01008492 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8493 memoryManager,
8494 shape0, input0, 0.5f, 2,
8495 shape1, input1, 1.0f, 0,
8496 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008497}
8498
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008499LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8500 armnn::IWorkloadFactory& workloadFactory,
8501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008502{
8503 const unsigned int shape0[] = { 1, 1, 2, 2 };
8504 const unsigned int shape1[] = { 1, 1, 1, 1 };
8505
8506 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8507 std::vector<uint8_t> input1({ 2 });
8508 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8509
Sadik Armagan2999a022019-04-09 14:20:12 +01008510 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8511 memoryManager,
8512 shape0, input0, 0.5f, 2,
8513 shape1, input1, 1.0f, 0,
8514 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008515}
8516
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008517LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8518 armnn::IWorkloadFactory& workloadFactory,
8519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008520{
8521 const unsigned int shape0[] = { 1, 1, 2, 2 };
8522 const unsigned int shape1[] = { 1, 1, 2, 1 };
8523
8524 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8525 std::vector<uint8_t> input1({ 2, 1 });
8526 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8527
Sadik Armagan2999a022019-04-09 14:20:12 +01008528 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8529 memoryManager,
8530 shape0, input0, 1.0f, 0,
8531 shape1, input1, 1.0f, 0,
8532 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008533}
8534
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008535LayerTestResult<float, 4> SubtractionTest(
8536 armnn::IWorkloadFactory& workloadFactory,
8537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008538{
8539 const unsigned int shape0[] = { 1, 1, 2, 2 };
8540 const unsigned int shape1[] = { 1, 1, 2, 2 };
8541
8542 std::vector<float> input0({ 1, 2, 3, 4 });
8543 std::vector<float> input1({ 1, -1, 0, 2 });
8544 std::vector<float> output({ 0, 3, 3, 2 });
8545
Sadik Armagan2999a022019-04-09 14:20:12 +01008546 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8547 memoryManager,
8548 shape0, input0, 1.0f, 0,
8549 shape1, input1, 1.0f, 0,
8550 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008551}
8552
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008553LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8554 armnn::IWorkloadFactory& workloadFactory,
8555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008556{
8557 const unsigned int shape0[] = { 1, 1, 2, 2 };
8558 const unsigned int shape1[] = { 1, 1, 1, 1 };
8559
8560 std::vector<float> input0({ 1, 2, 3, 4 });
8561 std::vector<float> input1({ 10 });
8562 std::vector<float> output({ -9, -8, -7, -6 });
8563
Sadik Armagan2999a022019-04-09 14:20:12 +01008564 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8565 memoryManager,
8566 shape0, input0, 1.0f, 0,
8567 shape1, input1, 1.0f, 0,
8568 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008569}
8570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008571LayerTestResult<float, 4> SubtractionBroadcastTest(
8572 armnn::IWorkloadFactory& workloadFactory,
8573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008574{
8575 const unsigned int shape0[] = { 1, 1, 2, 2 };
8576 const unsigned int shape1[] = { 1, 1, 1, 2 };
8577
8578 std::vector<float> input0({ 1, 2, 3, 4 });
8579 std::vector<float> input1({ 10, -5 });
8580 std::vector<float> output({ -9, 7, -7, 9 });
8581
Sadik Armagan2999a022019-04-09 14:20:12 +01008582 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8583 memoryManager,
8584 shape0, input0, 1.0f, 0,
8585 shape1, input1, 1.0f, 0,
8586 shape0, output, 1.0f, 0);
8587}
8588
8589LayerTestResult<int16_t, 4> SubtractionInt16Test(
8590 armnn::IWorkloadFactory& workloadFactory,
8591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8592{
8593 const unsigned int shape0[] = { 1, 1, 2, 2 };
8594 const unsigned int shape1[] = { 1, 1, 2, 2 };
8595
8596 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8597 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8598 std::vector<int16_t> output({ 3, 3, 5, 5 });
8599
8600 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8601 memoryManager,
8602 shape0, input0, 0.5f, 0,
8603 shape1, input1, 1.0f, 0,
8604 shape0, output, 1.0f, 0);
8605}
8606
8607LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8608 armnn::IWorkloadFactory& workloadFactory,
8609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8610{
8611 const unsigned int shape0[] = { 1, 1, 2, 2 };
8612 const unsigned int shape1[] = { 1, 1, 1, 1 };
8613
8614 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8615 std::vector<int16_t> input1({ 2 });
8616 std::vector<int16_t> output({ 3, 4, 5, 6 });
8617
8618 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8619 memoryManager,
8620 shape0, input0, 0.5f, 0,
8621 shape1, input1, 1.0f, 0,
8622 shape0, output, 1.0f, 0);
8623}
8624
8625LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8626 armnn::IWorkloadFactory& workloadFactory,
8627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8628{
8629 const unsigned int shape0[] = { 1, 1, 2, 2 };
8630 const unsigned int shape1[] = { 1, 1, 2, 1 };
8631
8632 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8633 std::vector<int16_t> input1({ 2, 1 });
8634 std::vector<int16_t> output({ 8, 11, 12, 15 });
8635
8636 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8637 memoryManager,
8638 shape0, input0, 1.0f, 0,
8639 shape1, input1, 1.0f, 0,
8640 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008641}
8642
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008643LayerTestResult<float, 4> BatchNormTest(
8644 armnn::IWorkloadFactory& workloadFactory,
8645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008646{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008647 // BatchSize: 1
8648 // Channels: 2
8649 // Height: 3
8650 // Width: 2
8651
8652 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8653 std::vector<float> inputValues
8654 {
8655 // Batch 0, Channel 0, Height (3) x Width (2)
8656 1.f, 4.f,
8657 4.f, 2.f,
8658 1.f, 6.f,
8659
8660 // Batch 0, Channel 1, Height (3) x Width (2)
8661 1.f, 1.f,
8662 4.f, 1.f,
8663 -2.f, 4.f
8664 };
8665 std::vector<float> expectedOutputValues
8666 {
8667 // Batch 0, Channel 0, Height (3) x Width (2)
8668 1.f, 4.f,
8669 4.f, 2.f,
8670 1.f, 6.f,
8671
8672 // Batch 0, Channel 1, Height (3) x Width (2)
8673 3.f, 3.f,
8674 4.f, 3.f,
8675 2.f, 4.f
8676 };
8677
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008678 return BatchNormTestImpl<armnn::DataType::Float32>(
8679 workloadFactory, memoryManager,
8680 inputOutputShape, inputValues, expectedOutputValues,
8681 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008682}
8683
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008684LayerTestResult<float, 4> BatchNormNhwcTest(
8685 armnn::IWorkloadFactory& workloadFactory,
8686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008687{
8688 // BatchSize: 1
8689 // Height: 3
8690 // Width: 2
8691 // Channels: 2
8692
8693 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8694 std::vector<float> inputValues
8695 {
8696 // Batch 0, Height 0, Width (2) x Channel (2)
8697 1.f, 1.f,
8698 4.f, 1.f,
8699
8700 // Batch 0, Height 1, Width (2) x Channel (2)
8701 4.f, 4.f,
8702 2.f, 1.f,
8703
8704 // Batch 0, Height 2, Width (2) x Channel (2)
8705 1.f, -2.f,
8706 6.f, 4.f
8707 };
8708 std::vector<float> expectedOutputValues
8709 {
8710 // Batch 0, Height 0, Width (2) x Channel (2)
8711 1.f, 3.f,
8712 4.f, 3.f,
8713
8714 // Batch 0, Height 1, Width (2) x Channel (2)
8715 4.f, 4.f,
8716 2.f, 3.f,
8717
8718 // Batch 0, Height 2, Width (2) x Channel (2)
8719 1.f, 2.f,
8720 6.f, 4.f
8721 };
8722
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008723 return BatchNormTestImpl<armnn::DataType::Float32>(
8724 workloadFactory, memoryManager,
8725 inputOutputShape, inputValues, expectedOutputValues,
8726 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008727}
8728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008729LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8730 armnn::IWorkloadFactory& workloadFactory,
8731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008732{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008733 // BatchSize: 1
8734 // Channels: 2
8735 // Height: 3
8736 // Width: 2
8737
8738 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8739 std::vector<float> inputValues
8740 {
8741 // Batch 0, Channel 0, Height (3) x Width (2)
8742 1.f, 4.f,
8743 4.f, 2.f,
8744 1.f, 6.f,
8745
8746 // Batch 0, Channel 1, Height (3) x Width (2)
8747 1.f, 1.f,
8748 4.f, 1.f,
8749 -2.f, 4.f
8750 };
8751 std::vector<float> expectedOutputValues
8752 {
8753 // Batch 0, Channel 0, Height (3) x Width (2)
8754 1.f, 4.f,
8755 4.f, 2.f,
8756 1.f, 6.f,
8757
8758 // Batch 0, Channel 1, Height (3) x Width (2)
8759 3.f, 3.f,
8760 4.f, 3.f,
8761 2.f, 4.f
8762 };
8763
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008764 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8765 workloadFactory, memoryManager,
8766 inputOutputShape, inputValues, expectedOutputValues,
8767 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008768}
8769
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008770LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8771 armnn::IWorkloadFactory& workloadFactory,
8772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008773{
8774 // BatchSize: 1
8775 // Height: 3
8776 // Width: 2
8777 // Channels: 2
8778
8779 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8780 std::vector<float> inputValues
8781 {
8782 // Batch 0, Height 0, Width (2) x Channel (2)
8783 1.f, 1.f,
8784 4.f, 1.f,
8785
8786 // Batch 0, Height 1, Width (2) x Channel (2)
8787 4.f, 4.f,
8788 2.f, 1.f,
8789
8790 // Batch 0, Height 2, Width (2) x Channel (2)
8791 1.f, -2.f,
8792 6.f, 4.f
8793 };
8794 std::vector<float> expectedOutputValues
8795 {
8796 // Batch 0, Height 0, Width (2) x Channel (2)
8797 1.f, 3.f,
8798 4.f, 3.f,
8799
8800 // Batch 0, Height 1, Width (2) x Channel (2)
8801 4.f, 4.f,
8802 2.f, 3.f,
8803
8804 // Batch 0, Height 2, Width (2) x Channel (2)
8805 1.f, 2.f,
8806 6.f, 4.f
8807 };
8808
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008809 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8810 (workloadFactory, memoryManager,
8811 inputOutputShape, inputValues, expectedOutputValues,
8812 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008813}
8814
Matteo Martincighf5507132019-06-04 10:59:47 +01008815LayerTestResult<int16_t, 4> BatchNormInt16Test(
8816 armnn::IWorkloadFactory& workloadFactory,
8817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8818{
8819 // BatchSize: 1
8820 // Channels: 2
8821 // Height: 3
8822 // Width: 2
8823
8824 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8825 std::vector<float> inputValues
8826 {
8827 // Batch 0, Channel 0, Height (3) x Width (2)
8828 1.f, 4.f,
8829 4.f, 2.f,
8830 1.f, 6.f,
8831
8832 // Batch 0, Channel 1, Height (3) x Width (2)
8833 1.f, 1.f,
8834 4.f, 1.f,
8835 -2.f, 4.f
8836 };
8837 std::vector<float> expectedOutputValues
8838 {
8839 // Batch 0, Channel 0, Height (3) x Width (2)
8840 1.f, 4.f,
8841 4.f, 2.f,
8842 1.f, 6.f,
8843
8844 // Batch 0, Channel 1, Height (3) x Width (2)
8845 3.f, 3.f,
8846 4.f, 3.f,
8847 2.f, 4.f
8848 };
8849
8850 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8851 workloadFactory, memoryManager,
8852 inputOutputShape, inputValues, expectedOutputValues,
8853 1.f/20.f, 50, armnn::DataLayout::NCHW);
8854}
8855
8856LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8857 armnn::IWorkloadFactory& workloadFactory,
8858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8859{
8860 // BatchSize: 1
8861 // Height: 3
8862 // Width: 2
8863 // Channels: 2
8864
8865 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8866 std::vector<float> inputValues
8867 {
8868 // Batch 0, Height 0, Width (2) x Channel (2)
8869 1.f, 1.f,
8870 4.f, 1.f,
8871
8872 // Batch 0, Height 1, Width (2) x Channel (2)
8873 4.f, 4.f,
8874 2.f, 1.f,
8875
8876 // Batch 0, Height 2, Width (2) x Channel (2)
8877 1.f, -2.f,
8878 6.f, 4.f
8879 };
8880 std::vector<float> expectedOutputValues
8881 {
8882 // Batch 0, Height 0, Width (2) x Channel (2)
8883 1.f, 3.f,
8884 4.f, 3.f,
8885
8886 // Batch 0, Height 1, Width (2) x Channel (2)
8887 4.f, 4.f,
8888 2.f, 3.f,
8889
8890 // Batch 0, Height 2, Width (2) x Channel (2)
8891 1.f, 2.f,
8892 6.f, 4.f
8893 };
8894
8895 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8896 (workloadFactory, memoryManager,
8897 inputOutputShape, inputValues, expectedOutputValues,
8898 1.f/20.f, 50, armnn::DataLayout::NHWC);
8899}
8900
Nina Drozd58ef2c62019-05-16 12:09:18 +01008901LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008902 armnn::IWorkloadFactory& workloadFactory,
8903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008905 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008906}
8907
Nina Drozd58ef2c62019-05-16 12:09:18 +01008908LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8909 armnn::IWorkloadFactory& workloadFactory,
8910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8911{
8912 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8913}
8914
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008915LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8916 armnn::IWorkloadFactory& workloadFactory,
8917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008918{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008919 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008920}
8921
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008922LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8923 armnn::IWorkloadFactory& workloadFactory,
8924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008925{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008926 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008927}
8928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008929LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8930 armnn::IWorkloadFactory& workloadFactory,
8931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008932{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008933 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008934}
8935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008936LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8937 armnn::IWorkloadFactory& workloadFactory,
8938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008939{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008940 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8941 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008942}
8943
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008944LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8945 armnn::IWorkloadFactory& workloadFactory,
8946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008947{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008948 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8949 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008950}
8951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008952LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8953 armnn::IWorkloadFactory& workloadFactory,
8954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008955{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008956 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008957}
8958
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008959LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8960 armnn::IWorkloadFactory& workloadFactory,
8961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008962{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008963 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008964}
8965
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008966LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8967 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8969 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008970{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008971 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8972 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008973}
8974
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008975LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8976 armnn::IWorkloadFactory& workloadFactory,
8977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008978{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008979 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008980}
8981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008982LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8983 armnn::IWorkloadFactory& workloadFactory,
8984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008985{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008986 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8987 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008988}
8989
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008990LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8991 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8993 bool useSubtensor)
8994{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008995 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8996 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008997}
8998
8999LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9000 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009002{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009003 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009004}
9005
9006LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9007 armnn::IWorkloadFactory& workloadFactory,
9008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009010 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009011}
9012
9013LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9014 armnn::IWorkloadFactory& workloadFactory,
9015 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9016{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009017 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009018}
9019
9020LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9021 armnn::IWorkloadFactory& workloadFactory,
9022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9023{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009024 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9025 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00009026}
9027
9028LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9029 armnn::IWorkloadFactory& workloadFactory,
9030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9031{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009032 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9033 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009034}
9035
9036LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9037 armnn::IWorkloadFactory& workloadFactory,
9038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9039{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009040 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9041 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009042}
9043
9044LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9045 armnn::IWorkloadFactory& workloadFactory,
9046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9047{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009048 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9049 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009050}
9051
9052LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9053 armnn::IWorkloadFactory& workloadFactory,
9054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9055 bool useSubtensor)
9056{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009057 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9058 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00009059}
9060
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009061LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9062 armnn::IWorkloadFactory& workloadFactory,
9063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9064 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009065{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009066 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9067 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009068}
9069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009070LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9071 armnn::IWorkloadFactory& workloadFactory,
9072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9073 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009074{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009075 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009076 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00009077}
9078
Teresa Charlin0434df62019-06-06 13:40:35 +01009079LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9080 armnn::IWorkloadFactory& workloadFactory,
9081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9082 bool forceNoPadding)
9083{
9084 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9085 workloadFactory, memoryManager, forceNoPadding);
9086}
9087
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009088LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9089 armnn::IWorkloadFactory& workloadFactory,
9090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9091 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009092{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009093 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9094 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009095}
9096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009097LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9098 armnn::IWorkloadFactory& workloadFactory,
9099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9100 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009101{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009102 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009103 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009104}
9105
Teresa Charlin0434df62019-06-06 13:40:35 +01009106LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9107 armnn::IWorkloadFactory& workloadFactory,
9108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9109 bool forceNoPadding)
9110{
9111 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9112 workloadFactory, memoryManager, forceNoPadding);
9113}
9114
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009115LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9116 armnn::IWorkloadFactory& workloadFactory,
9117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009118 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009119{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009120 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009121}
9122
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009123LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9124 armnn::IWorkloadFactory& workloadFactory,
9125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009126 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01009127{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009128 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01009129}
9130
Teresa Charlin0434df62019-06-06 13:40:35 +01009131LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9132 armnn::IWorkloadFactory& workloadFactory,
9133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9134 const armnn::DataLayout dataLayout)
9135{
9136 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9137}
9138LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9139 armnn::IWorkloadFactory& workloadFactory,
9140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9141{
9142 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9143}
9144
9145LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9146 armnn::IWorkloadFactory& workloadFactory,
9147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9148{
9149 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9150 workloadFactory, memoryManager, 1.0f, -5);
9151}
9152
9153LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9154 armnn::IWorkloadFactory& workloadFactory,
9155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9156{
9157 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9158 workloadFactory, memoryManager);
9159}
9160
9161LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9162 armnn::IWorkloadFactory& workloadFactory,
9163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9164{
9165 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9166}
9167
9168LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9169 armnn::IWorkloadFactory& workloadFactory,
9170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9171{
9172 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9173 workloadFactory, memoryManager, 1.0f, -5);
9174}
9175
9176LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9177 armnn::IWorkloadFactory& workloadFactory,
9178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9179{
9180 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9181 workloadFactory, memoryManager);
9182}
9183
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009184LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9185 armnn::IWorkloadFactory& workloadFactory,
9186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009187 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009188{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009189 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01009190}
9191
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009192LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9193 armnn::IWorkloadFactory& workloadFactory,
9194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009195 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01009196{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009197 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009198 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009199}
9200
Teresa Charlin0434df62019-06-06 13:40:35 +01009201LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9202 armnn::IWorkloadFactory& workloadFactory,
9203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9204 const armnn::DataLayout dataLayout)
9205{
9206 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9207 workloadFactory, memoryManager, dataLayout);
9208}
9209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009210LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9211 armnn::IWorkloadFactory& workloadFactory,
9212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9213 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01009214{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009215 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009216 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01009217}
9218
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009219LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9220 armnn::IWorkloadFactory& workloadFactory,
9221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009222{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009223 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009224}
9225
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009226LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009229{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009230 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9231 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009232}
9233
Teresa Charlin0434df62019-06-06 13:40:35 +01009234LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9235 armnn::IWorkloadFactory& workloadFactory,
9236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9237{
9238 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9239 workloadFactory, memoryManager);
9240}
9241LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9242 armnn::IWorkloadFactory& workloadFactory,
9243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9244{
9245 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9246}
9247
9248LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9249 armnn::IWorkloadFactory& workloadFactory,
9250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9251{
9252 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9253 workloadFactory, memoryManager);
9254}
9255
9256LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9257 armnn::IWorkloadFactory& workloadFactory,
9258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9259{
9260 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9261 workloadFactory, memoryManager);
9262}
9263
9264LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9265 armnn::IWorkloadFactory& workloadFactory,
9266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9267{
9268 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9269 workloadFactory, memoryManager);
9270}
9271
9272LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9273 armnn::IWorkloadFactory& workloadFactory,
9274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9275{
9276 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9277 workloadFactory, memoryManager);
9278}
9279
9280LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9281 armnn::IWorkloadFactory& workloadFactory,
9282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9283{
9284 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9285 workloadFactory, memoryManager);
9286}
9287
9288LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9289 armnn::IWorkloadFactory& workloadFactory,
9290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9291{
9292 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9293}
9294
9295LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9296 armnn::IWorkloadFactory& workloadFactory,
9297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9298{
9299 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9300 workloadFactory, memoryManager);
9301}
9302
9303LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9304 armnn::IWorkloadFactory& workloadFactory,
9305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9306{
9307 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9308 workloadFactory, memoryManager);
9309}
9310
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009311LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9312 armnn::IWorkloadFactory& workloadFactory,
9313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009314 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009315{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009316 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009317}
9318
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009319LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9320 armnn::IWorkloadFactory& workloadFactory,
9321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009322 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009323{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009324 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009325}
9326
Teresa Charlin0434df62019-06-06 13:40:35 +01009327LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9328 armnn::IWorkloadFactory& workloadFactory,
9329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9330 const armnn::DataLayout dataLayout)
9331{
9332 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9333}
9334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009335LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9336 armnn::IWorkloadFactory& workloadFactory,
9337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009338{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009339 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009340}
9341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009342LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9343 armnn::IWorkloadFactory& workloadFactory,
9344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009345{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009346 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009347}
9348
Teresa Charlin0434df62019-06-06 13:40:35 +01009349LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9350 armnn::IWorkloadFactory& workloadFactory,
9351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9352{
9353 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9354}
9355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009356LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9357 armnn::IWorkloadFactory& workloadFactory,
9358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009359{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009360 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009361}
9362
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009363LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9364 armnn::IWorkloadFactory& workloadFactory,
9365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009366{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009367 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009368}
9369
Teresa Charlin0434df62019-06-06 13:40:35 +01009370LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9371 armnn::IWorkloadFactory& workloadFactory,
9372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9373{
9374 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9375}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009376LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9377 armnn::IWorkloadFactory& workloadFactory,
9378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009380 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009381}
9382
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009383LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9384 armnn::IWorkloadFactory& workloadFactory,
9385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009386{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009387 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009388}
9389
Teresa Charlin0434df62019-06-06 13:40:35 +01009390LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9391 armnn::IWorkloadFactory& workloadFactory,
9392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9393{
9394 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9395}
9396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009397LayerTestResult<float, 4> L2Pooling2dSize7Test(
9398 armnn::IWorkloadFactory& workloadFactory,
9399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009401 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009402}
9403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009404LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9405 armnn::IWorkloadFactory& workloadFactory,
9406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009408 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009409}
9410
Teresa Charlin0434df62019-06-06 13:40:35 +01009411LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9412 armnn::IWorkloadFactory& workloadFactory,
9413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9414{
9415 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9416}
9417
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009418LayerTestResult<float, 4> L2Pooling2dSize9Test(
9419 armnn::IWorkloadFactory& workloadFactory,
9420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009421{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009422 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009423}
9424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009425LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9426 armnn::IWorkloadFactory& workloadFactory,
9427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009428{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009429 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009430}
9431
Teresa Charlin0434df62019-06-06 13:40:35 +01009432LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9433 armnn::IWorkloadFactory& workloadFactory,
9434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9435{
9436 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9437}
9438LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9439 armnn::IWorkloadFactory& workloadFactory,
9440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9441{
9442 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9443}
9444
9445LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9446 armnn::IWorkloadFactory& workloadFactory,
9447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9448{
9449 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9450}
9451
9452LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9453 armnn::IWorkloadFactory& workloadFactory,
9454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9455{
9456 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9457}
9458
9459LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9460 armnn::IWorkloadFactory& workloadFactory,
9461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9462{
9463 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9464}
9465
9466LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9467 armnn::IWorkloadFactory& workloadFactory,
9468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9469{
9470 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9471}
9472
9473LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9474 armnn::IWorkloadFactory& workloadFactory,
9475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9476{
9477 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9478}
9479
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009480LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9481 armnn::IWorkloadFactory& workloadFactory,
9482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009483{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009484 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009485}
9486
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009487LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9488 armnn::IWorkloadFactory& workloadFactory,
9489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009490{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009491 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009492}
9493
Teresa Charlin0434df62019-06-06 13:40:35 +01009494LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9495 armnn::IWorkloadFactory& workloadFactory,
9496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9497{
9498 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9499}
9500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009501LayerTestResult<float, 4> ComparePooling2dTest(
9502 armnn::IWorkloadFactory& workloadFactory,
9503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9504 armnn::IWorkloadFactory& refWorkloadFactory,
9505 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009507 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009508 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009509}
9510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009511LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9512 armnn::IWorkloadFactory& workloadFactory,
9513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9514 armnn::IWorkloadFactory& refWorkloadFactory,
9515 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009517 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009518 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009519}
9520
Teresa Charlin0434df62019-06-06 13:40:35 +01009521LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9522 armnn::IWorkloadFactory& workloadFactory,
9523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9524 armnn::IWorkloadFactory& refWorkloadFactory,
9525 armnn::PoolingAlgorithm poolingType)
9526{
9527 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9528 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9529}
9530
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009531LayerTestResult<float, 2> FullyConnectedLargeTest(
9532 armnn::IWorkloadFactory& workloadFactory,
9533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9534 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009535{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009536 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009537}
9538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009539LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9540 armnn::IWorkloadFactory& workloadFactory,
9541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009542{
9543 // Create Initial Tensor
9544 // 1, 2, 3
9545 // 4, 5, 6
9546 // 7, 8, 9
9547
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009548 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9549 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009550
9551 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9552 {1, 2, 3,
9553 4, 5, 6,
9554 7, 8, 9
9555 });
9556
9557 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9558 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9559 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9560 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9561
9562 // Apply MaxPool poolSize = 1x1, stride=2x2
9563 // Result =
9564 // 1, 3
9565 // 7, 9
9566 armnn::Pooling2dDescriptor descriptor;
9567 descriptor.m_PoolHeight = 1;
9568 descriptor.m_PoolWidth = 1;
9569 descriptor.m_StrideX = 2;
9570 descriptor.m_StrideY = 2;
9571 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9572
9573 armnn::Pooling2dQueueDescriptor queueDescriptor;
9574 queueDescriptor.m_Parameters = descriptor;
9575 armnn::WorkloadInfo workloadInfo;
9576 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9577 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9578
9579 // Create the MaxPool
9580 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9581
9582 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9583 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9584 boost::multi_array<float, 4> resultMaxPool;
9585 resultMaxPool.resize(shape);
9586
9587
9588 // Create addition with another tensor the same size
9589 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9590 // with the initial tensor.
9591 // 12, 16
9592 // 24, 28
9593
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009594 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9595 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009596
9597 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9598 {12, 16,
9599 24, 28,
9600 });
9601
9602 // Expected output tensor after MaxPool and Addition.
9603 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9604 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9605 {
9606 13, 19,
9607 31, 37
9608 }));
9609
9610 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9611 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9612
9613 armnn::AdditionQueueDescriptor data;
9614 armnn::WorkloadInfo info;
9615
9616 // Add the output of the MaxPool and the new tensor
9617 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9618 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9619 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9620
9621 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9622
9623 poolingInputHandle->Allocate();
9624 poolingOutputHandle->Allocate();
9625 addInputHandle->Allocate();
9626 addOutputHandle->Allocate();
9627
9628 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9629 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9630
9631 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9632 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9633
Derek Lambertif30f7d32019-04-09 10:25:02 +01009634 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009635 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009636 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009637 addWorkload->Execute();
9638
9639 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9640
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009641 return addRet;
9642}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009643
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009644LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9645 armnn::IWorkloadFactory& workloadFactory,
9646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009647{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009648 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009649}
9650
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009651LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9652 armnn::IWorkloadFactory& workloadFactory,
9653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009654{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009655 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009656}
9657
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009658LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9659 armnn::IWorkloadFactory& workloadFactory,
9660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009661{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009662 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009663}
9664
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009665LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9666 armnn::IWorkloadFactory& workloadFactory,
9667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009668{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009669 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009670}
9671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009672LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9673 armnn::IWorkloadFactory& workloadFactory,
9674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009675{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009676 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009677}
9678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009679LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9680 armnn::IWorkloadFactory& workloadFactory,
9681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009682{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009683 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009684}
9685
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009686LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9687 armnn::IWorkloadFactory& workloadFactory,
9688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009689{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009690 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009691}
9692
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009693LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9694 armnn::IWorkloadFactory& workloadFactory,
9695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009696{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009697 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009698}
9699
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009700LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9701 armnn::IWorkloadFactory& workloadFactory,
9702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009703{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009704 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009705}
9706
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009707LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9708 armnn::IWorkloadFactory& workloadFactory,
9709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009710{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009711 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009712}
9713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009714LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9715 armnn::IWorkloadFactory& workloadFactory,
9716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009717{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009718 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009719}
9720
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009721LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9722 armnn::IWorkloadFactory& workloadFactory,
9723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009724{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009725 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009726}
9727
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009728LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9729 armnn::IWorkloadFactory& workloadFactory,
9730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009731{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009732 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009733}
9734
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009735LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9736 armnn::IWorkloadFactory& workloadFactory,
9737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009738{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009739 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009740}
9741
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009742LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9743 armnn::IWorkloadFactory& workloadFactory,
9744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009745{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009746 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009747}
9748
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009749LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9750 armnn::IWorkloadFactory& workloadFactory,
9751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009752{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009753 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009754}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009755
nikraj01120522a2019-05-31 11:33:07 +01009756LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9757 armnn::IWorkloadFactory& workloadFactory,
9758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9759{
9760 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9761}
9762
9763LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9764 armnn::IWorkloadFactory& workloadFactory,
9765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9766{
9767 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9768}
9769
9770LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9771 armnn::IWorkloadFactory& workloadFactory,
9772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9773{
9774 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9775}
9776
9777LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9778 armnn::IWorkloadFactory& workloadFactory,
9779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9780{
9781 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9782}
9783
9784LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9785 armnn::IWorkloadFactory& workloadFactory,
9786 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9787{
9788 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9789}
9790
9791LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9792 armnn::IWorkloadFactory& workloadFactory,
9793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9794{
9795 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9796}
9797
9798LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9799 armnn::IWorkloadFactory& workloadFactory,
9800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9801{
9802 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9803}
9804
9805LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9806 armnn::IWorkloadFactory& workloadFactory,
9807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9808{
9809 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9810}
9811
Keith Davisa57eccb2019-06-14 17:33:22 +01009812LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9813 armnn::IWorkloadFactory& workloadFactory,
9814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9815{
James Conroyd2aa85e2019-07-01 17:12:40 +01009816 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009817 workloadFactory,
9818 memoryManager);
9819}
9820
9821LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9822 armnn::IWorkloadFactory& workloadFactory,
9823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824{
James Conroyd2aa85e2019-07-01 17:12:40 +01009825 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009826 workloadFactory,
9827 memoryManager,
9828 armnn::DataLayout::NCHW);
9829}
9830
James Conroyd2aa85e2019-07-01 17:12:40 +01009831LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009832 armnn::IWorkloadFactory& workloadFactory,
9833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9834{
James Conroyd2aa85e2019-07-01 17:12:40 +01009835 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009836 workloadFactory,
9837 memoryManager);
9838}
9839
James Conroyd2aa85e2019-07-01 17:12:40 +01009840LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009841 armnn::IWorkloadFactory& workloadFactory,
9842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9843{
James Conroyd2aa85e2019-07-01 17:12:40 +01009844 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9845 workloadFactory,
9846 memoryManager,
9847 armnn::DataLayout::NCHW);
9848}
9849
9850LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9851 armnn::IWorkloadFactory& workloadFactory,
9852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9853{
9854 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9855 workloadFactory,
9856 memoryManager);
9857}
9858
9859LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9860 armnn::IWorkloadFactory& workloadFactory,
9861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9862{
9863 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9864 workloadFactory,
9865 memoryManager,
9866 armnn::DataLayout::NCHW);
9867}
9868
9869LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9870 armnn::IWorkloadFactory& workloadFactory,
9871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9872{
9873 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9874 workloadFactory,
9875 memoryManager);
9876}
9877
9878LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9879 armnn::IWorkloadFactory& workloadFactory,
9880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9881{
9882 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009883 workloadFactory,
9884 memoryManager,
9885 armnn::DataLayout::NCHW);
9886}
9887
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009888namespace {
9889
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009890} // anonymous namespace
9891
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009892LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9893 armnn::IWorkloadFactory& workloadFactory,
9894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9895{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009896 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009897}
9898
9899LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9900 armnn::IWorkloadFactory& workloadFactory,
9901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9902{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009903 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009904}
9905
9906LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9907 armnn::IWorkloadFactory& workloadFactory,
9908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9909{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009910 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009911}
9912
9913LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9914 armnn::IWorkloadFactory& workloadFactory,
9915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9916{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009917 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009918}
9919
9920LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9921 armnn::IWorkloadFactory& workloadFactory,
9922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9923{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009924 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009925}
9926
9927LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9928 armnn::IWorkloadFactory& workloadFactory,
9929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9930{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009931 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009932}
9933
9934LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9935 armnn::IWorkloadFactory& workloadFactory,
9936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9937{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009938 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009939}
9940
9941LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9942 armnn::IWorkloadFactory& workloadFactory,
9943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9944{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009945 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009946}
9947
9948LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9949 armnn::IWorkloadFactory& workloadFactory,
9950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9951{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009952 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009953}
9954
9955LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9956 armnn::IWorkloadFactory& workloadFactory,
9957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9958{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009959 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009960}
9961
9962LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9963 armnn::IWorkloadFactory& workloadFactory,
9964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009966 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009967}
9968
9969LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9970 armnn::IWorkloadFactory& workloadFactory,
9971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9972{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009973 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009974}
9975
9976LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9977 armnn::IWorkloadFactory& workloadFactory,
9978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9979{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009980 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009981}
9982
9983LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9984 armnn::IWorkloadFactory& workloadFactory,
9985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9986{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009987 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009988}
9989
9990LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9991 armnn::IWorkloadFactory& workloadFactory,
9992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9993{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009994 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009995}
9996
9997LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9998 armnn::IWorkloadFactory& workloadFactory,
9999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10000{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010001 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010002}
10003
10004LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10005 armnn::IWorkloadFactory& workloadFactory,
10006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10007{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010008 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010009}
10010
10011LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10012 armnn::IWorkloadFactory& workloadFactory,
10013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10014{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010015 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010016}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010017
Matteo Martincigh42666a12019-05-29 08:53:41 +010010018LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10019 armnn::IWorkloadFactory& workloadFactory,
10020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10021{
10022 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10023}
10024
10025LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10026 armnn::IWorkloadFactory& workloadFactory,
10027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10028{
10029 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10030}
10031
10032LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10033 armnn::IWorkloadFactory& workloadFactory,
10034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10035{
10036 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10037}
10038
10039LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10040 armnn::IWorkloadFactory& workloadFactory,
10041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10042{
10043 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10044}
10045
10046LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10047 armnn::IWorkloadFactory& workloadFactory,
10048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10049{
10050 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10051}
10052
10053LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10054 armnn::IWorkloadFactory& workloadFactory,
10055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10056{
10057 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10058}
10059
10060LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10061 armnn::IWorkloadFactory& workloadFactory,
10062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10063{
10064 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10065}
10066
10067LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10068 armnn::IWorkloadFactory& workloadFactory,
10069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10070{
10071 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10072}
10073
10074LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10075 armnn::IWorkloadFactory& workloadFactory,
10076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10077{
10078 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10079}
10080
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010081LayerTestResult<float, 4> Debug4DFloat32Test(
10082 armnn::IWorkloadFactory& workloadFactory,
10083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10084{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010085 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010086}
10087
10088LayerTestResult<float, 3> Debug3DFloat32Test(
10089 armnn::IWorkloadFactory& workloadFactory,
10090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10091{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010092 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010093}
10094
10095LayerTestResult<float, 2> Debug2DFloat32Test(
10096 armnn::IWorkloadFactory& workloadFactory,
10097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10098{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010099 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010100}
10101
10102LayerTestResult<float, 1> Debug1DFloat32Test(
10103 armnn::IWorkloadFactory& workloadFactory,
10104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10105{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010106 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010107}
10108
10109LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10110 armnn::IWorkloadFactory& workloadFactory,
10111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10112{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010113 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010114}
10115
10116LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10117 armnn::IWorkloadFactory& workloadFactory,
10118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10119{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010120 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010121}
10122
10123LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10124 armnn::IWorkloadFactory& workloadFactory,
10125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10126{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010127 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010128}
10129
10130LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10131 armnn::IWorkloadFactory& workloadFactory,
10132 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10133{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010134 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010135}
Matteo Martincigh49124022019-01-11 13:25:59 +000010136
narpra014951d842019-01-18 16:53:53 +000010137LayerTestResult<float, 1> Gather1DParamsFloatTest(
10138 armnn::IWorkloadFactory& workloadFactory,
10139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10140{
10141 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10142}
10143
10144LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10145 armnn::IWorkloadFactory& workloadFactory,
10146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10147{
10148 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10149}
10150
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010151LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10152 armnn::IWorkloadFactory& workloadFactory,
10153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10154{
10155 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10156}
10157
narpra014951d842019-01-18 16:53:53 +000010158LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10159 armnn::IWorkloadFactory& workloadFactory,
10160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10161{
10162 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10163}
10164
10165LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10166 armnn::IWorkloadFactory& workloadFactory,
10167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10168{
10169 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10170}
10171
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010172LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10173 armnn::IWorkloadFactory& workloadFactory,
10174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10175{
10176 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10177}
10178
narpra014951d842019-01-18 16:53:53 +000010179LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10180 armnn::IWorkloadFactory& workloadFactory,
10181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10182{
10183 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10184}
10185
10186LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10187 armnn::IWorkloadFactory& workloadFactory,
10188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10189{
10190 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10191 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +000010192}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010193
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010194LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10195 armnn::IWorkloadFactory& workloadFactory,
10196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10197{
10198 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10199 workloadFactory, memoryManager);
10200}
10201
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010202LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010203 armnn::IWorkloadFactory& workloadFactory,
10204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10205{
10206 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10207}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010208
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010209LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10210 armnn::IWorkloadFactory& workloadFactory,
10211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10212{
10213 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10214}
10215
10216LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10217 armnn::IWorkloadFactory& workloadFactory,
10218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10219{
10220 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10221}
10222
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010223LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10224 armnn::IWorkloadFactory& workloadFactory,
10225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10226{
10227 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10228}
10229
10230LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10231 armnn::IWorkloadFactory& workloadFactory,
10232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10233{
10234 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10235}
10236
10237LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10238 armnn::IWorkloadFactory& workloadFactory,
10239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10240{
10241 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10242}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010010243
10244//
10245// TransposeConvolution2d
10246//
10247
10248// Simple biased
10249LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
10250 armnn::IWorkloadFactory& workloadFactory,
10251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10252{
10253 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10254 workloadFactory,
10255 memoryManager,
10256 true,
10257 armnn::DataLayout::NCHW);
10258}
10259
10260LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
10261 armnn::IWorkloadFactory& workloadFactory,
10262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10263{
10264 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10265 workloadFactory,
10266 memoryManager,
10267 true,
10268 armnn::DataLayout::NHWC);
10269}
10270
10271LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
10272 armnn::IWorkloadFactory& workloadFactory,
10273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10274{
10275 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10276 workloadFactory,
10277 memoryManager,
10278 true,
10279 armnn::DataLayout::NCHW);
10280}
10281
10282LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
10283 armnn::IWorkloadFactory& workloadFactory,
10284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10285{
10286 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10287 workloadFactory,
10288 memoryManager,
10289 true,
10290 armnn::DataLayout::NHWC);
10291}
10292
10293LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
10294 armnn::IWorkloadFactory& workloadFactory,
10295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10296{
10297 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10298 workloadFactory,
10299 memoryManager,
10300 true,
10301 armnn::DataLayout::NCHW);
10302}
10303
10304LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
10305 armnn::IWorkloadFactory& workloadFactory,
10306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10307{
10308 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10309 workloadFactory,
10310 memoryManager,
10311 true,
10312 armnn::DataLayout::NHWC);
10313}
10314
10315// Simple unbiased
10316LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
10317 armnn::IWorkloadFactory& workloadFactory,
10318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10319{
10320 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10321 workloadFactory,
10322 memoryManager,
10323 false,
10324 armnn::DataLayout::NCHW);
10325}
10326
10327LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
10328 armnn::IWorkloadFactory& workloadFactory,
10329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10330{
10331 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10332 workloadFactory,
10333 memoryManager,
10334 false,
10335 armnn::DataLayout::NHWC);
10336}
10337
10338LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10339 armnn::IWorkloadFactory& workloadFactory,
10340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10341{
10342 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10343 workloadFactory,
10344 memoryManager,
10345 false,
10346 armnn::DataLayout::NCHW);
10347}
10348
10349LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10350 armnn::IWorkloadFactory& workloadFactory,
10351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10352{
10353 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10354 workloadFactory,
10355 memoryManager,
10356 false,
10357 armnn::DataLayout::NHWC);
10358}
10359
10360LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10361 armnn::IWorkloadFactory& workloadFactory,
10362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10363{
10364 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10365 workloadFactory,
10366 memoryManager,
10367 false,
10368 armnn::DataLayout::NCHW);
10369}
10370
10371LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10372 armnn::IWorkloadFactory& workloadFactory,
10373 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10374{
10375 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10376 workloadFactory,
10377 memoryManager,
10378 false,
10379 armnn::DataLayout::NHWC);
10380}
10381
10382// Padded biased
10383LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10384 armnn::IWorkloadFactory& workloadFactory,
10385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10386{
10387 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10388 workloadFactory,
10389 memoryManager,
10390 true,
10391 armnn::DataLayout::NCHW);
10392}
10393
10394LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10395 armnn::IWorkloadFactory& workloadFactory,
10396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10397{
10398 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10399 workloadFactory,
10400 memoryManager,
10401 true,
10402 armnn::DataLayout::NHWC);
10403}
10404
10405LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10406 armnn::IWorkloadFactory& workloadFactory,
10407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10408{
10409 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10410 workloadFactory,
10411 memoryManager,
10412 true,
10413 armnn::DataLayout::NCHW);
10414}
10415
10416LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10417 armnn::IWorkloadFactory& workloadFactory,
10418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10419{
10420 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10421 workloadFactory,
10422 memoryManager,
10423 true,
10424 armnn::DataLayout::NHWC);
10425}
10426
10427LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10428 armnn::IWorkloadFactory& workloadFactory,
10429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10430{
10431 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10432 workloadFactory,
10433 memoryManager,
10434 true,
10435 armnn::DataLayout::NCHW);
10436}
10437
10438LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10439 armnn::IWorkloadFactory& workloadFactory,
10440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10441{
10442 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10443 workloadFactory,
10444 memoryManager,
10445 true,
10446 armnn::DataLayout::NHWC);
10447}
10448
10449// Padded unbiased
10450LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10451 armnn::IWorkloadFactory& workloadFactory,
10452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10453{
10454 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10455 workloadFactory,
10456 memoryManager,
10457 false,
10458 armnn::DataLayout::NCHW);
10459}
10460
10461LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10462 armnn::IWorkloadFactory& workloadFactory,
10463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10464{
10465 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10466 workloadFactory,
10467 memoryManager,
10468 false,
10469 armnn::DataLayout::NHWC);
10470}
10471
10472LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10473 armnn::IWorkloadFactory& workloadFactory,
10474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10475{
10476 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10477 workloadFactory,
10478 memoryManager,
10479 false,
10480 armnn::DataLayout::NCHW);
10481}
10482
10483LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10484 armnn::IWorkloadFactory& workloadFactory,
10485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10486{
10487 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10488 workloadFactory,
10489 memoryManager,
10490 false,
10491 armnn::DataLayout::NHWC);
10492}
10493
10494LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10495 armnn::IWorkloadFactory& workloadFactory,
10496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10497{
10498 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10499 workloadFactory,
10500 memoryManager,
10501 false,
10502 armnn::DataLayout::NCHW);
10503}
10504
10505LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10506 armnn::IWorkloadFactory& workloadFactory,
10507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10508{
10509 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10510 workloadFactory,
10511 memoryManager,
10512 false,
10513 armnn::DataLayout::NHWC);
10514}
10515
10516// Strided biased
10517LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10518 armnn::IWorkloadFactory& workloadFactory,
10519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10520{
10521 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10522 workloadFactory,
10523 memoryManager,
10524 true,
10525 armnn::DataLayout::NCHW);
10526}
10527
10528LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10529 armnn::IWorkloadFactory& workloadFactory,
10530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10531{
10532 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10533 workloadFactory,
10534 memoryManager,
10535 true,
10536 armnn::DataLayout::NHWC);
10537}
10538
10539LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10540 armnn::IWorkloadFactory& workloadFactory,
10541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10542{
10543 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10544 workloadFactory,
10545 memoryManager,
10546 true,
10547 armnn::DataLayout::NCHW);
10548}
10549
10550LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10551 armnn::IWorkloadFactory& workloadFactory,
10552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10553{
10554 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10555 workloadFactory,
10556 memoryManager,
10557 true,
10558 armnn::DataLayout::NHWC);
10559}
10560
10561LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10562 armnn::IWorkloadFactory& workloadFactory,
10563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10564{
10565 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10566 workloadFactory,
10567 memoryManager,
10568 true,
10569 armnn::DataLayout::NCHW);
10570}
10571
10572LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10573 armnn::IWorkloadFactory& workloadFactory,
10574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10575{
10576 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10577 workloadFactory,
10578 memoryManager,
10579 true,
10580 armnn::DataLayout::NHWC);
10581}
10582
10583// Strided unbiased
10584LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10585 armnn::IWorkloadFactory& workloadFactory,
10586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10587{
10588 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10589 workloadFactory,
10590 memoryManager,
10591 false,
10592 armnn::DataLayout::NCHW);
10593}
10594
10595LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10596 armnn::IWorkloadFactory& workloadFactory,
10597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10598{
10599 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10600 workloadFactory,
10601 memoryManager,
10602 false,
10603 armnn::DataLayout::NHWC);
10604}
10605
10606LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10607 armnn::IWorkloadFactory& workloadFactory,
10608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10609{
10610 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10611 workloadFactory,
10612 memoryManager,
10613 false,
10614 armnn::DataLayout::NCHW);
10615}
10616
10617LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10618 armnn::IWorkloadFactory& workloadFactory,
10619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10620{
10621 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10622 workloadFactory,
10623 memoryManager,
10624 false,
10625 armnn::DataLayout::NHWC);
10626}
10627
10628LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10629 armnn::IWorkloadFactory& workloadFactory,
10630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10631{
10632 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10633 workloadFactory,
10634 memoryManager,
10635 false,
10636 armnn::DataLayout::NCHW);
10637}
10638
10639LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10640 armnn::IWorkloadFactory& workloadFactory,
10641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10642{
10643 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10644 workloadFactory,
10645 memoryManager,
10646 false,
10647 armnn::DataLayout::NHWC);
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +010010648}