blob: 2201499b3a635e0d7e8fbb1f45242d620e27d9dd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
Francis Murtagh07f21212019-07-23 09:50:50 +010080struct Simple3dSoftmaxOutputData
81{
82 const std::vector<float> outputData =
83 {
84 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
85 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f
86 };
87
88 const armnn::TensorShape inputShape{ 1, 8, 1 };
89
90 const std::vector<float> inputData =
91 {
92 0.f, 1.f, 0.f, 0.f,
93 .5f, 0.f, 0.f, 0.f,
94 };
95};
96
97struct Simple4dSoftmaxData
98{
99 const armnn::TensorShape inputShape{ 1, 8, 1, 1 };
100
101 const std::vector<float> outputData = { 0.0964599f, 0.26220518f, 0.0964599f, 0.0964599f,
102 0.15903549f, 0.0964599f, 0.0964599f, 0.0964599f };
103 const std::vector<float> inputData =
104 {
105 0.f, 1.f, 0.f, 0.f,
106 .5f, 0.f, 0.f, 0.f
107 };
108};
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000111template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +0100112boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +0000113{
114 if(biasEnabled)
115 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000116 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +0100117 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +0000118 return bias;
119 }
120 else
121 {
122 return boost::multi_array<T, 1>();
123 }
124}
125
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000126template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000127LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
130 float qScale,
131 int32_t qOffset,
132 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000133 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000134{
telsoa01c577f2c2018-08-31 09:22:23 +0100135 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000136 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000137 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
138
telsoa01c577f2c2018-08-31 09:22:23 +0100139 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000140 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000141 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
142 QuantizedVector<T>(qScale, qOffset, {
143 1, 1, 1,
144 1, -1, 1,
145 1, 1, 1,
146 1, 1, 1,
147 1, 1, 1,
148
149 0, 0, 0,
150 0, 0, 0,
151 0, 0, 0,
152 0, 0, 0,
153 0, 0, 0,
154
155 2, 2, 2,
156 2, 2, 2,
157 2, 2, 2,
158 2, 2, 2,
159 2, 2, 2,
160
161
162 0, 0, 0,
163 0, 0, 0,
164 0, 0, 0,
165 0, 0, 0,
166 0, 0, 0,
167
168 1, 1, 1,
169 1, 1, 1,
170 1, 1, 1,
171 1, 1, 1,
172 1, 1, 1,
173
174 0, 0, 0,
175 0, 0, 0,
176 0, 0, 0,
177 0, 0, 0,
178 0, 0, 0
179 })));
180
telsoa01c577f2c2018-08-31 09:22:23 +0100181 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000182 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000183 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
184 QuantizedVector<T>(qScale, qOffset, {
185 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
186 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
187 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
188 -23.5f, -23.5f, -23.5f,
189 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
190 -23.5f, -23.5f, -23.5f,
191
192 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
193 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
194 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
195 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
196 })));
197
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000198 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
199 workloadFactory,
200 memoryManager,
201 input,
202 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100203 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000204 expectedOutput,
205 qScale,
206 qOffset,
207 layout);
telsoa014fcda012018-03-09 14:13:49 +0000208}
209
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
211 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000212LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
213 armnn::IWorkloadFactory& workloadFactory,
214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
215 float qScale,
216 int32_t qOffset,
217 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000218 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000219{
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000221
telsoa01c577f2c2018-08-31 09:22:23 +0100222 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000223 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000224 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 1, 1, 1,
231 1, -1, 1,
232 1, 1, 1,
233
234 0, 0, 0,
235 0, 0, 0,
236 0, 0, 0,
237
238 2, 2, 2,
239 2, 2, 2,
240 2, 2, 2,
241
242
243 0, 0, 0,
244 0, 0, 0,
245 0, 0, 0,
246
247 1, 1, 1,
248 1, 1, 1,
249 1, 1, 1,
250
251 0, 0, 0,
252 0, 0, 0,
253 0, 0, 0
254 })));
255
telsoa01c577f2c2018-08-31 09:22:23 +0100256 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000258 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
259 QuantizedVector<T>(qScale, qOffset, {
260 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
261 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
262 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
263 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
264 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
265 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
266
267 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
268 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
269 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
272 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
273 })));
274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000275 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
276 workloadFactory,
277 memoryManager,
278 input,
279 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100280 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000281 expectedOutput,
282 qScale,
283 qOffset,
284 layout);
telsoa014fcda012018-03-09 14:13:49 +0000285}
286
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000287template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000288LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
289 armnn::IWorkloadFactory& workloadFactory,
290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
291 float qScale,
292 int32_t qOffset,
293 bool biasEnabled,
294 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100295{
296 // Use common single-batch 5x5 image.
297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000298 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100299 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
300 {
301 1, 5, 2, 3,
302 8, 7, 3, 6,
303 3, 3, 9, 1
304 });
305
306
307 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000308 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100309 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
310 4, 5, 6,
311 0, 0, 0,
312 3, 2, 1
313 });
314
315 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000316 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100317
318 const std::vector<float> outputData =
319 {
320 23, 41, 33, 21,
321 44, 65, 76, 52,
322 82, 85, 79, 42
323 };
324
325 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000327 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
328 workloadFactory,
329 memoryManager,
330 input,
331 kernel,
332 boost::multi_array<T, 1>(),
333 expectedOutput,
334 dataLayout,
335 qScale,
336 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100337}
338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000340LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
341 armnn::IWorkloadFactory& workloadFactory,
342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343 float qScale,
344 int32_t qOffset,
345 bool biasEnabled,
346 const armnn::DataLayout& dataLayout)
347{
348 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000349 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000350 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
351 {
352 1, 5, 2, 3, 5,
353 8, 7, 3, 6, 3,
354 3, 3, 9, 1, 9,
355 4, 1, 8, 1, 3,
356 6, 8, 1, 9, 2
357 });
358
359 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000360 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000361 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
362 {
363 4, 5, 6,
364 0, 0, 0,
365 3, 2, 1
366 });
367
368 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000369 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000370
371 const std::vector<T> outputData =
372 {
373 23, 33, 24,
374 91, 99, 48,
375 26, 50, 19
376 };
377
378 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
379
380 uint32_t padLeft = 1;
381 uint32_t padTop = 1;
382 uint32_t padRight = 1;
383 uint32_t padBottom = 1;
384 uint32_t strideX = 2;
385 uint32_t strideY = 2;
386
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000387 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
388 workloadFactory,
389 memoryManager,
390 input,
391 kernel,
392 boost::multi_array<T, 1>(),
393 expectedOutput,
394 dataLayout,
395 qScale,
396 qOffset,
397 padLeft,
398 padTop,
399 padRight,
400 padBottom,
401 strideX,
402 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000409 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000410{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
412 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000413}
414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000415LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
416 armnn::IWorkloadFactory& workloadFactory,
417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
418 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000419 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000420{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000421 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
422 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000423}
424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000425LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
426 armnn::IWorkloadFactory& workloadFactory,
427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
428 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000429 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000430{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000431 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
432 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000433}
434
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000435LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
436 armnn::IWorkloadFactory& workloadFactory,
437 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
438 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
441 workloadFactory,
442 memoryManager,
443 0.f,
444 0,
445 biasEnabled,
446 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100447}
448
Mike Kelly7332ed82018-12-20 17:03:06 +0000449LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 bool biasEnabled,
453 const armnn::DataLayout layout)
454{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000455 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
456 workloadFactory,
457 memoryManager,
458 0.f,
459 0,
460 biasEnabled,
461 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000462}
463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000464LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
465 armnn::IWorkloadFactory& workloadFactory,
466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
467 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000468 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000469{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000470 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
471 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000472}
473
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100474LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
475 armnn::IWorkloadFactory& workloadFactory,
476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
477 bool biasEnabled,
478 const armnn::DataLayout layout)
479{
480return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
481 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
482}
483
484LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
485 armnn::IWorkloadFactory& workloadFactory,
486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
487 bool biasEnabled,
488 const armnn::DataLayout layout)
489{
490 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
491 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
492}
493
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000494template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
495 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000496LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
497 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000499 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 float qScale,
501 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000502{
telsoa01c577f2c2018-08-31 09:22:23 +0100503 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000504 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000505 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
506 QuantizedVector<T>(qScale, qOffset, {
507 11,21,31,
508 12,22,32,
509 13,23,33
510 })));
511
telsoa01c577f2c2018-08-31 09:22:23 +0100512 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000513 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000514 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
515 QuantizedVector<T>(qScale, qOffset, {
516 -11,-21,
517 -12,-22,
518 })));
519
telsoa01c577f2c2018-08-31 09:22:23 +0100520// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000521// Manually calculated like this:
522//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
523//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
524//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
525//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
526//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
527//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
528//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000529 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000530 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
531 QuantizedVector<T>(qScale, qOffset, {
532 0, 0, 0, 0, 0, 0,
533 -242, -594, -934, -372, 0, 0,
534 -495, -1190, -1850, -725, 0, 0,
535 -538, -1256, -1916, -748, 0, 0,
536 -273, -626, -946, -363, 0, 0,
537 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0,
539 0, 0, 0, 0, 0, 0
540 })));
541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
543 workloadFactory,
544 memoryManager,
545 input,
546 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100547 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000548 expectedOutput,
549 qScale,
550 qOffset,
551 layout,
552 1, // Padding left.
553 2, // Padding top.
554 3, // Padding right.
555 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000556}
557
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000558template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
559 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000560LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
561 armnn::IWorkloadFactory& workloadFactory,
562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000563 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000564 float qScale,
565 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000566{
telsoa01c577f2c2018-08-31 09:22:23 +0100567 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000568 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000569 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
570 QuantizedVector<T>(qScale, qOffset, {
571 11,21,31,41,51,
572 12,22,32,42,52,
573 13,23,33,43,53,
574 14,24,34,44,54,
575 15,25,35,45,55,
576 })));
577
telsoa01c577f2c2018-08-31 09:22:23 +0100578 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000580 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
581 QuantizedVector<T>(qScale, qOffset, {
582 -11,-21,-31,-41,
583 -12,-22,-32,-42,
584 -13,-23,-33,-43,
585 -14,-24,-34,-44,
586 })));
587
telsoa01c577f2c2018-08-31 09:22:23 +0100588 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000589 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000590 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
591 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
592 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000593 -7140, -10580, -13940, -9300, -5230,
594 -9590, -14120, -18520, -12290, -6860,
595 -9980, -14560, -18960, -12560, -7000,
596 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100597 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000598 })));
599
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000600 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
601 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000602 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000603 input,
604 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100605 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000606 expectedOutput,
607 qScale,
608 qOffset,
narpra015f703182018-10-26 16:24:58 +0100609 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100610 1, // Padding left.
611 1, // Padding top.
612 2, // Padding right.
613 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100614}
615
Teresa Charlinedeeb162019-06-14 11:09:19 +0100616LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
617 armnn::IWorkloadFactory& workloadFactory,
618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
619 armnn::DataLayout layout)
620{
621 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
622 workloadFactory, memoryManager, layout, 0.0f, 0);
623}
624
625LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
626 armnn::IWorkloadFactory& workloadFactory,
627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
628 armnn::DataLayout layout)
629{
630 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
631 <armnn::DataType::Float32, armnn::DataType::Float32>(
632 workloadFactory, memoryManager, layout, 0.0f, 0);
633}
634
635LayerTestResult<float, 4> Convolution1dTest(
636 armnn::IWorkloadFactory& workloadFactory,
637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
638 bool biasEnabled)
639{
640 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
641 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
642}
643
644LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
645 armnn::IWorkloadFactory& workloadFactory,
646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
647 bool biasEnabled)
648{
649 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
650 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
651}
652
653LayerTestResult<float,4> CompareConvolution2dTest(
654 armnn::IWorkloadFactory& workloadFactory,
655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
656 armnn::IWorkloadFactory& refWorkloadFactory)
657{
658 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
659 workloadFactory, memoryManager, refWorkloadFactory);
660}
661
662template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
663LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
664 armnn::IWorkloadFactory& workloadFactory,
665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
666 const std::vector<float>& inputNoQuantizedValues,
667 armnn::TensorInfo& inputTensorInfo,
668 const std::vector<float>& kernelNoQuantizedValues,
669 armnn::TensorInfo& kernelTensorInfo,
670 const std::vector<float>& outputExpectedNoQuantizedValues,
671 armnn::TensorInfo& outputTensorInfo,
672 uint32_t dilationX,
673 uint32_t dilationY,
674 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100675 uint32_t padLeft = 0,
676 uint32_t padTop = 0,
677 uint32_t padRight = 0,
678 uint32_t padBottom = 0,
679 uint32_t strideX = 1,
680 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100681 bool biasEnabled = false
682)
683{
684 float qScale;
685 int32_t qOffset;
686 switch (ArmnnType)
687 {
688 case armnn::DataType::QuantisedAsymm8:
689 {
690 qScale = 0.1f;
691 qOffset = 128;
692 break;
693 }
694 case armnn::DataType::QuantisedSymm16:
695 {
696 qScale = 0.1f;
697 qOffset = 0;
698 break;
699 }
700 case armnn::DataType::Float32:
701 default:
702 {
703 qScale = 0.f;
704 qOffset = 0;
705 break;
706 }
707 }
708
709 inputTensorInfo.SetQuantizationScale(qScale);
710 inputTensorInfo.SetQuantizationOffset(qOffset);
711 kernelTensorInfo.SetQuantizationScale(qScale);
712 kernelTensorInfo.SetQuantizationOffset(qOffset);
713 outputTensorInfo.SetQuantizationScale(qScale);
714 outputTensorInfo.SetQuantizationOffset(qOffset);
715
716 auto input = MakeTensor<T, 4>(inputTensorInfo,
717 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
718 inputTensorInfo.GetQuantizationOffset(),
719 inputNoQuantizedValues)));
720 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
721 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
722 kernelTensorInfo.GetQuantizationOffset(),
723 kernelNoQuantizedValues)));
724 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
725 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
726 outputTensorInfo.GetQuantizationOffset(),
727 outputExpectedNoQuantizedValues)));
728
Teresa Charlinedeeb162019-06-14 11:09:19 +0100729 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
730 workloadFactory,
731 memoryManager,
732 input,
733 kernel,
734 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
735 expectedOutput,
736 qScale,
737 qOffset,
738 layout,
739 padLeft,
740 padTop,
741 padRight,
742 padBottom,
743 strideX,
744 strideY,
745 dilationX,
746 dilationY);
747}
748
749template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
750LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
751 armnn::IWorkloadFactory& workloadFactory,
752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
753 bool biasEnabled,
754 const armnn::DataLayout layout)
755{
756 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
757 std::vector<float> inputNoQuantizedValues =
758 {
759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
762 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
763 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
764 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
765 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
766 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
767 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
768 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
769 };
770
771 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
772 std::vector<float> kernelNoQuantizedValues =
773 {
774 1, 2, 3,
775 4, 5, 6,
776 7, 8, 9
777 };
778
779 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
780 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
781 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
782 std::vector<float> outputExpectedNoQuantizedValues =
783 {
784 6., 5., 5., 5.,
785 6., 5., 5., 5.,
786 6., 5., 5., 5.,
787 3., 2., 2., 2.
788 };
789
790 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
791 workloadFactory,
792 memoryManager,
793 inputNoQuantizedValues,
794 inputTensorInfo,
795 kernelNoQuantizedValues,
796 kernelTensorInfo,
797 outputExpectedNoQuantizedValues,
798 outputTensorInfo,
799 3,
800 3,
801 layout,
802 biasEnabled);
803}
804
805template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
806LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
807 armnn::IWorkloadFactory& workloadFactory,
808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
809 bool biasEnabled,
810 const armnn::DataLayout layout)
811{
812 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
813 std::vector<float> inputNoQuantizedValues =
814 {
815 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
817 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
819 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
820 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
821 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
822 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
825
826 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
827 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
828 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
829 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
830 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
831 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
834 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
835 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
836 };
837
838 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
839 std::vector<float> kernelNoQuantizedValues =
840 {
841 1, 2, 3,
842 4, 5, 6,
843 7, 8, 9,
844
845 1, 2, 3,
846 4, 5, 6,
847 7, 8, 9
848 };
849
850 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
851 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
852 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
853 std::vector<float> outputExpectedNoQuantizedValues =
854 {
855 12., 10., 10., 10.,
856 12., 10., 10., 10.,
857 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100858 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100859 };
860
861 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
862 workloadFactory,
863 memoryManager,
864 inputNoQuantizedValues,
865 inputTensorInfo,
866 kernelNoQuantizedValues,
867 kernelTensorInfo,
868 outputExpectedNoQuantizedValues,
869 outputTensorInfo,
870 3,
871 3,
872 layout,
873 biasEnabled);
874}
875
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100876template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
877LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
878 armnn::IWorkloadFactory &workloadFactory,
879 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
880 bool biasEnabled,
881 const armnn::DataLayout layout)
882{
883 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
884 std::vector<float> inputNoQuantizedValues =
885 {
886 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
887 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
888 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
889 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
890 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
891 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
892 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
893 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
894 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
895 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
896 };
897
898 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
899 std::vector<float> kernelNoQuantizedValues =
900 {
901 1, 2,
902 3, 4
903 };
904
905 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
906 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
Jan Eilers0bf6b232019-07-12 10:46:33 +0100907 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100908 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
909 std::vector<float> outputExpectedNoQuantizedValues =
910 {
911 4, 7, 7, 3,
912 6, 10, 10, 4,
913 6, 10, 10, 4,
914 2, 3, 3, 1
915 };
916 uint32_t padLeft = 1;
917 uint32_t padTop = 1;
918 uint32_t padRight = 1;
919 uint32_t padBottom = 1;
920
921 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
922 workloadFactory,
923 memoryManager,
924 inputNoQuantizedValues,
925 inputTensorInfo,
926 kernelNoQuantizedValues,
927 kernelTensorInfo,
928 outputExpectedNoQuantizedValues,
929 outputTensorInfo,
930 2,
931 2,
932 layout,
933 padLeft,
934 padTop,
935 padRight,
936 padBottom,
937 3,
938 3,
939 biasEnabled
940 );
941}
942
Teresa Charlinedeeb162019-06-14 11:09:19 +0100943template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
944Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
945 armnn::IWorkloadFactory&,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
947 bool,
948 armnn::DataLayout);
949
950template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
951Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
952 armnn::IWorkloadFactory&,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
954 bool,
955 armnn::DataLayout);
956
957template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
958Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
959 armnn::IWorkloadFactory&,
960 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
961 bool,
962 armnn::DataLayout);
963
964template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
965Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
966 armnn::IWorkloadFactory&,
967 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
968 bool,
969 armnn::DataLayout);
970
971template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
972Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
973 armnn::IWorkloadFactory&,
974 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
975 bool,
976 armnn::DataLayout);
977
978template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
979Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
980 armnn::IWorkloadFactory&,
981 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
982 bool,
983 armnn::DataLayout);
984
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100985template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
986Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
987 armnn::IWorkloadFactory &workloadFactory,
988 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
989 bool biasEnabled,
990 const armnn::DataLayout layout);
991
992template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
993Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
994 armnn::IWorkloadFactory &workloadFactory,
995 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
996 bool biasEnabled,
997 const armnn::DataLayout layout);
998
999template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1000Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1001 armnn::IWorkloadFactory &workloadFactory,
1002 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
1003 bool biasEnabled,
1004 const armnn::DataLayout layout);
1005
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001006template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1007 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001008LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
1009 armnn::IWorkloadFactory& workloadFactory,
1010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1011 float qScale,
1012 int32_t qOffset,
1013 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001014 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001015{
telsoa01c577f2c2018-08-31 09:22:23 +01001016 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001018 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001019 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1020 {
surmeh013537c2c2018-05-18 16:31:43 +01001021 0, 1, 2, 3, 4,
1022 5, 6, 7, 8, 9,
1023 10, 11, 12, 13, 14,
1024 15, 16, 17, 18, 19,
1025 20, 21, 22, 23, 24,
1026
1027 25, 26, 27, 28, 29,
1028 30, 31, 32, 33, 34,
1029 35, 36, 37, 38, 39,
1030 40, 41, 42, 43, 44,
1031 45, 46, 47, 48, 49
1032 })));
1033
telsoa01c577f2c2018-08-31 09:22:23 +01001034 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001036 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001037 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1038 {
surmeh013537c2c2018-05-18 16:31:43 +01001039 32, 31, 30, 29,
1040 28, 27, 26, 25,
1041 24, 23, 22, 21,
1042 20, 19, 18, 17,
1043
1044 16, 15, 14, 13,
1045 12, 11, 10, 9,
1046 8, 7, 6, 5,
1047 4, 3, 2, 1
1048 })));
1049
telsoa01c577f2c2018-08-31 09:22:23 +01001050 // Expected output is 1 batch of a 2-channel 5x5 image.
1051 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001052 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001053 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001054 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1055 {
surmeh013537c2c2018-05-18 16:31:43 +01001056 1062, 1580, 1850, 1530, 1117,
1057 2140, 3108, 3500, 2842, 2042,
1058 3580, 5068, 5460, 4342, 3062,
1059 3618, 5072, 5390, 4248, 2971,
1060 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001061
surmeh013537c2c2018-05-18 16:31:43 +01001062 1550, 2284, 2362, 1955, 1428,
1063 2910, 4206, 4342, 3528, 2536,
1064 3390, 4886, 5022, 4068, 2916,
1065 3566, 5056, 5182, 4133, 2922,
1066 3100, 4352, 4452, 3517, 2465
1067 })));
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1070 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001072 input,
1073 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001074 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001075 expectedOutput,
1076 qScale,
1077 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001078 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001079 1, // Padding left.
1080 1, // Padding top.
1081 2, // Padding right.
1082 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001083 1, // strideX
1084 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001085}
1086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001087template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1088 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001089LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1090 armnn::IWorkloadFactory& workloadFactory,
1091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092 float qScale,
1093 int32_t qOffset,
1094 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001095{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001096 auto layout = armnn::DataLayout::NHWC;
1097
1098 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001099 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1101 {
1102 0, 1, 2, 3, 4,
1103 5, 6, 7, 8, 9,
1104 10, 11, 12, 13, 14,
1105 15, 16, 17, 18, 19,
1106 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001107
Teresa Charlin20b1f882019-06-19 09:34:37 +01001108 25, 26, 27, 28, 29,
1109 30, 31, 32, 33, 34,
1110 35, 36, 37, 38, 39,
1111 40, 41, 42, 43, 44,
1112 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001113 })));
1114
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001115 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001116 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1118 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001119 32, 31, 30, 29,
1120 28, 27, 26, 25,
1121 24, 23, 22, 21,
1122 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123
Matteo Martincigh747ef822018-12-18 09:26:39 +00001124 16, 15, 14, 13,
1125 12, 11, 10, 9,
1126 8, 7, 6, 5,
1127 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001128 })));
1129
Teresa Charlin20b1f882019-06-19 09:34:37 +01001130 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001131 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001132 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1133 {
1134 1062, 1580, 1850, 1530, 1117,
1135 2140, 3108, 3500, 2842, 2042,
1136 3580, 5068, 5460, 4342, 3062,
1137 3618, 5072, 5390, 4248, 2971,
1138 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001139
Teresa Charlin20b1f882019-06-19 09:34:37 +01001140 1550, 2284, 2362, 1955, 1428,
1141 2910, 4206, 4342, 3528, 2536,
1142 3390, 4886, 5022, 4068, 2916,
1143 3566, 5056, 5182, 4133, 2922,
1144 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001145 })));
1146
Teresa Charlin20b1f882019-06-19 09:34:37 +01001147 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001148 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001149 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001150 input,
1151 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001152 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001153 expectedOutput,
1154 qScale,
1155 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001156 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001157 1, // Padding left.
1158 1, // Padding top.
1159 2, // Padding right.
1160 2, // Padding bottom.
1161 1, // strideX
1162 1); // strideY
1163}
1164
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1166 typename T = armnn::ResolveType<ArmnnType>>
1167LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1168 armnn::IWorkloadFactory& workloadFactory,
1169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170 float qScale,
1171 int32_t qOffset,
1172 bool biasEnabled)
1173{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001174 auto layout = armnn::DataLayout::NHWC;
1175
1176 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001177 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001178 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1179 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001180 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0
1189 })));
1190
1191 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001193 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1194 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 1, 2, 3,
1196 4, 5, 6,
1197 7, 8, 9
1198 })));
1199
1200 uint32_t padLeft = 0;
1201 uint32_t padTop = 0;
1202 uint32_t padRight = 0;
1203 uint32_t padBottom = 0;
1204 uint32_t strideX = 1;
1205 uint32_t strideY = 1;
1206 uint32_t dilationX = 3;
1207 uint32_t dilationY = 3;
1208
1209 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001210 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001211 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001212 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1213 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001214 5, 5, 5,
1215 5, 5, 5,
1216 5, 5, 5
1217 })));
1218
Teresa Charlin20b1f882019-06-19 09:34:37 +01001219 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001220 workloadFactory,
1221 memoryManager,
1222 input,
1223 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001224 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001225 expectedOutput,
1226 qScale,
1227 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001228 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001229 padLeft,
1230 padTop,
1231 padRight,
1232 padBottom,
1233 strideX,
1234 strideY,
1235 dilationX,
1236 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001237}
1238
Teresa Charlin20b1f882019-06-19 09:34:37 +01001239
1240template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1241LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1242 armnn::IWorkloadFactory& workloadFactory,
1243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1244 const std::vector<float>& inputNoQuantizedValues,
1245 armnn::TensorInfo& inputTensorInfo,
1246 const std::vector<float>& kernelNoQuantizedValues,
1247 armnn::TensorInfo& kernelTensorInfo,
1248 const std::vector<float>& outputExpectedNoQuantizedValues,
1249 armnn::TensorInfo& outputTensorInfo,
1250 uint32_t dilationX,
1251 uint32_t dilationY,
1252 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1253 bool biasEnabled = false)
1254{
1255 float qScale;
1256 int32_t qOffset;
1257 switch (ArmnnType)
1258 {
1259 case armnn::DataType::QuantisedAsymm8:
1260 {
1261 qScale = 0.1f;
1262 qOffset = 128;
1263 break;
1264 }
1265 case armnn::DataType::QuantisedSymm16:
1266 {
1267 qScale = 0.1f;
1268 qOffset = 0;
1269 break;
1270 }
1271 case armnn::DataType::Float32:
1272 default:
1273 {
1274 qScale = 0.f;
1275 qOffset = 0;
1276 break;
1277 }
1278 }
1279
1280 inputTensorInfo.SetQuantizationScale(qScale);
1281 inputTensorInfo.SetQuantizationOffset(qOffset);
1282 kernelTensorInfo.SetQuantizationScale(qScale);
1283 kernelTensorInfo.SetQuantizationOffset(qOffset);
1284 outputTensorInfo.SetQuantizationScale(qScale);
1285 outputTensorInfo.SetQuantizationOffset(qOffset);
1286
1287 auto input = MakeTensor<T, 4>(inputTensorInfo,
1288 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1289 inputTensorInfo.GetQuantizationOffset(),
1290 inputNoQuantizedValues)));
1291 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1292 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1293 kernelTensorInfo.GetQuantizationOffset(),
1294 kernelNoQuantizedValues)));
1295 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1296 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1297 outputTensorInfo.GetQuantizationOffset(),
1298 outputExpectedNoQuantizedValues)));
1299
1300 uint32_t padLeft = 0;
1301 uint32_t padTop = 0;
1302 uint32_t padRight = 0;
1303 uint32_t padBottom = 0;
1304 uint32_t strideX = 1;
1305 uint32_t strideY = 1;
1306
1307 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1308 workloadFactory,
1309 memoryManager,
1310 input,
1311 kernel,
1312 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1313 expectedOutput,
1314 qScale,
1315 qOffset,
1316 layout,
1317 padLeft,
1318 padTop,
1319 padRight,
1320 padBottom,
1321 strideX,
1322 strideY,
1323 dilationX,
1324 dilationY);
1325}
1326
1327template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1328LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 bool biasEnabled,
1332 const armnn::DataLayout layout)
1333{
1334 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1335 std::vector<float> inputNoQuantizedValues =
1336 {
1337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1338 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1339 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1340 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1341 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1342 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1343 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1344 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1346 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1347 };
1348
1349 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1350 std::vector<float> kernelNoQuantizedValues =
1351 {
1352 1, 2, 3,
1353 4, 5, 6,
1354 7, 8, 9
1355 };
1356
1357 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1358 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1359 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1360 std::vector<float> outputExpectedNoQuantizedValues =
1361 {
1362 6., 5., 5., 5.,
1363 6., 5., 5., 5.,
1364 6., 5., 5., 5.,
1365 3., 2., 2., 2.
1366 };
1367
1368 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1369 workloadFactory,
1370 memoryManager,
1371 inputNoQuantizedValues,
1372 inputTensorInfo,
1373 kernelNoQuantizedValues,
1374 kernelTensorInfo,
1375 outputExpectedNoQuantizedValues,
1376 outputTensorInfo,
1377 3,
1378 3,
1379 layout,
1380 biasEnabled);
1381}
1382
1383template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1384LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1385 armnn::IWorkloadFactory& workloadFactory,
1386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387 bool biasEnabled,
1388 const armnn::DataLayout layout)
1389{
1390 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1391 std::vector<float> inputNoQuantizedValues =
1392 {
1393 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1394 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1396 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1397 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1398 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1399 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1400 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1403
1404 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1405 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1407 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1408 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1409 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1410 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1411 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1412 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1413 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1414 };
1415
1416 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1417 std::vector<float> kernelNoQuantizedValues =
1418 {
1419 1, 2, 3,
1420 4, 5, 6,
1421 7, 8, 9,
1422
1423 1, 2, 3,
1424 4, 5, 6,
1425 7, 8, 9
1426 };
1427
1428 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1429 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1430 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1431 std::vector<float> outputExpectedNoQuantizedValues =
1432 {
1433 6., 5., 5., 5.,
1434 6., 5., 5., 5.,
1435 6., 5., 5., 5.,
1436 3., 2., 2., 2.,
1437
1438 6., 5., 5., 5.,
1439 6., 5., 5., 5.,
1440 6., 5., 5., 5.,
1441 3., 2., 2., 2.
1442 };
1443
1444 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1445 workloadFactory,
1446 memoryManager,
1447 inputNoQuantizedValues,
1448 inputTensorInfo,
1449 kernelNoQuantizedValues,
1450 kernelTensorInfo,
1451 outputExpectedNoQuantizedValues,
1452 outputTensorInfo,
1453 3,
1454 3,
1455 layout,
1456 biasEnabled);
1457}
1458
1459
1460template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1461DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1462 armnn::IWorkloadFactory&,
1463 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1464 bool,
1465 armnn::DataLayout);
1466
1467template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1468DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1469 armnn::IWorkloadFactory&,
1470 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1471 bool,
1472 armnn::DataLayout);
1473
1474template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1475DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1476 armnn::IWorkloadFactory&,
1477 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1478 bool,
1479 armnn::DataLayout);
1480
1481template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1482DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1483 armnn::IWorkloadFactory&,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1485 bool,
1486 armnn::DataLayout);
1487
1488template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1489DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1490 armnn::IWorkloadFactory&,
1491 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1492 bool,
1493 armnn::DataLayout);
1494
1495template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1496DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1497 armnn::IWorkloadFactory&,
1498 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1499 bool,
1500 armnn::DataLayout);
1501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001502LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1503 armnn::IWorkloadFactory& workloadFactory,
1504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1505 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001506 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001507{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001508 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001509 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001510}
1511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001512LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1513 armnn::IWorkloadFactory& workloadFactory,
1514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001517 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001519}
1520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001525 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001527 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001528 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001531LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534{
1535 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1536 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1537
1538 std::vector<float> kernelData;
1539 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1540 for (unsigned int i = 0; i < 64; ++i)
1541 {
1542 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1543 }
1544 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1545 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1546
1547 std::vector<float> expectedOutputData(64, 0.f);
1548 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1549 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1550
1551 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1552 workloadFactory,
1553 memoryManager,
1554 input,
1555 kernel,
1556 boost::multi_array<float, 1>(),
1557 expectedOutput,
1558 0.f,
1559 0,
1560 armnn::DataLayout::NCHW);
1561}
1562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001563LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001567 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001569 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001570 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001571}
1572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001573LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1574 armnn::IWorkloadFactory& workloadFactory,
1575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001577 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001578{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001579 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001580 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001581}
1582
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001583LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1584 armnn::IWorkloadFactory& workloadFactory,
1585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1586 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001587 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001589 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001590 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001591}
1592
Bruno Goncalves22972f02019-04-26 21:03:24 -03001593LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1594 armnn::IWorkloadFactory& workloadFactory,
1595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1596{
1597 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001598 workloadFactory,
1599 memoryManager,
1600 0.f,
1601 0,
1602 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001603}
1604
Ruomei Yan88d44b82019-05-23 14:29:06 +01001605LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 bool biasEnabled,
1609 const armnn::DataLayout layout)
1610{
1611 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1612 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1613}
1614
1615LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 bool biasEnabled,
1619 const armnn::DataLayout layout)
1620{
1621 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1622 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1623}
1624
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001625LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001626 armnn::IWorkloadFactory& workloadFactory,
1627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001629 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001630{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001631 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1632 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001633}
1634
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001635LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1636 armnn::IWorkloadFactory& workloadFactory,
1637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1638 armnn::IWorkloadFactory& refWorkloadFactory,
1639 const armnn::DataLayout layout)
1640{
1641 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1642 workloadFactory, memoryManager, refWorkloadFactory, layout);
1643}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001644
1645LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001648{
1649 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1650 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001651 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001652}
1653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001654LayerTestResult<float,4> SimpleNormalizationWithinTest(
1655 armnn::IWorkloadFactory& workloadFactory,
1656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001657{
1658 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1659 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001660 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001666{
1667 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1668 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001669 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001670}
1671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001672LayerTestResult<float,2> SimpleSoftmaxTest(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1675 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001676{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001677 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001678}
1679
Francis Murtagh07f21212019-07-23 09:50:50 +01001680LayerTestResult<float,2> SimpleAxisSoftmaxTest(
1681 armnn::IWorkloadFactory& workloadFactory,
1682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1683 float beta,
1684 int axis)
1685{
1686 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, axis);
1687}
1688
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001689LayerTestResult<float,3> Simple3dSoftmaxTest(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1692 float beta)
1693{
Francis Murtagh07f21212019-07-23 09:50:50 +01001694 Simple3dSoftmaxOutputData data;
1695 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1696 data.inputShape, data.outputData, data.inputData);
1697}
1698
1699LayerTestResult<float,3> Simple3dAxisSoftmaxTest(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 float beta,
1703 int axis)
1704{
1705 armnn::TensorShape inputShape;
1706 std::vector<float> inputData;
1707 std::vector<float> outputData;
1708 switch (axis)
1709 {
1710 case -3:
1711 case 0:
1712 {
1713 inputShape = {5, 2, 2};
1714
1715 inputData =
1716 {
1717 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1718
1719 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1720 };
1721
1722 outputData =
1723 {
1724 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1725 0.236882800924671f,
1726 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1727 0.087144312427294f,
1728
1729 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1730 0.032058600957022f,
1731 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1732 7.246299848982885e-08f
1733 };
1734 break;
1735 }
1736 case -2:
1737 case 1:
1738 {
1739 inputShape = {2, 5, 2};
1740
1741 inputData =
1742 {
1743 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1744
1745 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1746 };
1747
1748 outputData =
1749 {
1750 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1751 0.087144312427294f,
1752 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1753 7.246299848982885e-08f,
1754
1755 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1756 0.087144312427294f,
1757 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1758 7.246299848982885e-08f
1759 };
1760 break;
1761 }
1762 case -1:
1763 case 2:
1764 {
1765 inputShape = {2, 2, 5};
1766
1767 inputData =
1768 {
1769 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1770 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1771 };
1772
1773 outputData =
1774 {
1775 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1776 7.246299848982885e-08f,
1777 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1778 7.246299848982885e-08f,
1779
1780 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1781 7.246299848982885e-08f,
1782 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1783 7.246299848982885e-08f
1784 };
1785 break;
1786 }
1787 }
1788
1789 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta,
1790 inputShape, outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001791}
1792
1793LayerTestResult<float,4> Simple4dSoftmaxTest(
1794 armnn::IWorkloadFactory& workloadFactory,
1795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1796 float beta)
1797{
Francis Murtagh07f21212019-07-23 09:50:50 +01001798 Simple4dSoftmaxData data;
1799 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, data.inputShape,
1800 data.outputData, data.inputData);
1801}
1802
1803LayerTestResult<float,4> Simple4dAxisSoftmaxTest(
1804 armnn::IWorkloadFactory& workloadFactory,
1805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806 float beta,
1807 int axis)
1808{
1809 armnn::TensorShape inputShape;
1810 std::vector<float> inputData;
1811 std::vector<float> outputData;
1812 switch (axis)
1813 {
1814 case -4:
1815 case 0:
1816 {
1817 inputShape = {5, 2, 2, 2};
1818
1819 inputData =
1820 {
1821 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f,
1822 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f,
1823 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f,
1824 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f
1825 };
1826
1827 outputData =
1828 {
1829 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1830 0.643914213228014f,
1831 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f,
1832 0.236882800924671f,
1833 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f,
1834 0.236882800924671f,
1835 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1836 0.087144312427294f,
1837
1838 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f,
1839 0.032058600957022f,
1840 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f,
1841 0.032058600957022f,
1842 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1843 7.246299848982885e-08f,
1844 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1845 7.246299848982885e-08f, 7.246299848982885e-08f
1846 };
1847 break;
1848 }
1849 case -3:
1850 case 1:
1851 {
1852 inputShape = {2, 5, 2, 2};
1853
1854 inputData =
1855 {
1856 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1857 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f,
1858 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f,
1859 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f
1860 };
1861
1862 outputData =
1863 {
1864 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1865 0.236882800924671f,
1866 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1867 0.087144312427294f,
1868 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1869 0.032058600957022f,
1870 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1871 7.246299848982885e-08f,
1872
1873
1874 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f,
1875 0.236882800924671f,
1876 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f,
1877 0.087144312427294f,
1878 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f,
1879 0.032058600957022f,
1880 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f,
1881 7.246299848982885e-08f
1882 };
1883 break;
1884 }
1885 case -2:
1886 case 2:
1887 {
1888 inputShape = {2, 2, 5, 2};
1889
1890 inputData =
1891 {
1892 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1893 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1894 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f,
1895 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f
1896 };
1897
1898 outputData =
1899 {
1900 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1901 0.087144312427294f,
1902 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1903 7.246299848982885e-08f,
1904 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1905 0.087144312427294f,
1906 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1907 7.246299848982885e-08f,
1908
1909 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1910 0.087144312427294f,
1911 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1912 7.246299848982885e-08f,
1913 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f,
1914 0.087144312427294f,
1915 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f,
1916 7.246299848982885e-08f
1917 };
1918 break;
1919 }
1920 case -1:
1921 case 3:
1922 {
1923 inputShape = {2, 2, 2, 5};
1924
1925 inputData =
1926 {
1927 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1928 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1929 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f,
1930 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f
1931 };
1932
1933 outputData =
1934 {
1935 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1936 7.246299848982885e-08f,
1937 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1938 7.246299848982885e-08f,
1939 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1940 7.246299848982885e-08f,
1941 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1942 7.246299848982885e-08f,
1943
1944 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1945 7.246299848982885e-08f,
1946 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1947 7.246299848982885e-08f,
1948 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1949 7.246299848982885e-08f,
1950 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f,
1951 7.246299848982885e-08f
1952 };
1953 break;
1954 }
1955 }
1956
1957 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta, inputShape,
1958 outputData, inputData, axis);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001959}
1960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001961LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1962 armnn::IWorkloadFactory& workloadFactory,
1963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1964 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001965{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001966 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001967}
1968
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001969LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1970 armnn::IWorkloadFactory& workloadFactory,
1971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1972 float beta)
1973{
Francis Murtagh07f21212019-07-23 09:50:50 +01001974 Simple3dSoftmaxOutputData data;
1975 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1976 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001977}
1978
1979LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1982 float beta)
1983{
Francis Murtagh07f21212019-07-23 09:50:50 +01001984 Simple4dSoftmaxData data;
1985
1986 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta,
1987 data.inputShape, data.outputData, data.inputData);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001988}
1989
nikraj01248683f2019-05-29 16:46:50 +01001990LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1993 float beta)
1994{
1995 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1996}
1997
1998LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1999 armnn::IWorkloadFactory& workloadFactory,
2000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2001 float beta)
2002{
Francis Murtagh07f21212019-07-23 09:50:50 +01002003 Simple3dSoftmaxOutputData data;
2004 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2005 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002006}
2007
2008LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2011 float beta)
2012{
Francis Murtagh07f21212019-07-23 09:50:50 +01002013 Simple4dSoftmaxData data;
2014
2015 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta,
2016 data.inputShape, data.outputData, data.inputData);
nikraj01248683f2019-05-29 16:46:50 +01002017}
2018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002019LayerTestResult<float,4> CompareNormalizationTest(
2020 armnn::IWorkloadFactory& workloadFactory,
2021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2022 armnn::IWorkloadFactory& refWorkloadFactory,
2023 armnn::NormalizationAlgorithmChannel normChannel,
2024 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00002025{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002026 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00002027}
2028
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002029LayerTestResult<float,2> CompareSoftmaxTest(
2030 armnn::IWorkloadFactory& workloadFactory,
2031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002032 armnn::IWorkloadFactory& refWorkloadFactory,
2033 float beta)
2034{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002035 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
2036 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002037}
2038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002039LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
2040 armnn::IWorkloadFactory& workloadFactory,
2041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002042 armnn::IWorkloadFactory& refWorkloadFactory,
2043 float beta)
2044{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002045 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
2046 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00002047}
2048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002049std::vector<LayerTestResult<float,3>> SplitterTest(
2050 armnn::IWorkloadFactory& workloadFactory,
2051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002052{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002053 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00002054}
2055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002056std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002060 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002061}
2062
Ruomei Yan25339c32019-05-28 16:48:20 +01002063std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
2064 armnn::IWorkloadFactory& workloadFactory,
2065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2066{
2067 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2068}
2069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002070LayerTestResult<float, 3> CopyViaSplitterTest(
2071 armnn::IWorkloadFactory& workloadFactory,
2072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002074 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002075}
2076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002077LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
2078 armnn::IWorkloadFactory& workloadFactory,
2079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002081 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002082}
2083
Ruomei Yan25339c32019-05-28 16:48:20 +01002084LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
2085 armnn::IWorkloadFactory& workloadFactory,
2086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2087{
2088 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
2089}
2090
Jan Eilers38e05bd2019-06-26 13:10:09 +01002091void LstmUtilsZeroVectorTest()
2092{
2093 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
2094 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2095 {2., 3., 3., 4.}));
2096
2097 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
2098 {0., 0., 0., 0.}));
2099
2100 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
2101}
2102
2103void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
2104{
2105 uint32_t batchSize = 2;
2106 uint32_t vecSize = 4;
2107 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2108 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2109 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
2110 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
2111
2112 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2113 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
2114 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
2115
2116 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2117 vecSize, batchSize, expectedOutput);
2118}
2119
2120void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
2121{
2122 uint32_t batchSize = 2;
2123 uint32_t vecSize = 4;
2124 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2125 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2126 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2127 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2128
2129 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2130 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2131 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
2132
2133 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2134 vecSize, batchSize, expectedOutput);
2135}
2136
2137void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
2138{
2139 uint32_t batchSize = 2;
2140 uint32_t vecSize = 4;
2141 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
2142 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2143 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2144 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
2145
2146 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2147 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
2148 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
2149
2150 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
2151 vecSize, batchSize, expectedOutput);
2152}
2153
2154
2155void LstmUtilsVectorBatchVectorCwiseProductTest()
2156{
2157 uint32_t batchSize = 4;
2158 uint32_t vecSize = 29;
2159 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2160 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2161 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2162 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2163 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
2164
2165 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2166 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2167 { /* batch 0 */
2168 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
2169 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
2170 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
2171 /* batch 1 */
2172 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
2173 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
2174 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
2175 /* batch 2 */
2176 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
2177 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
2178 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
2179 /* batch 3 */
2180 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
2181 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
2182 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
2183
2184 // Expect output = input * output + output.
2185 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2186 { /* batch 0 */
2187 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
2188 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
2189 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
2190 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
2191 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
2192 /* batch 1 */
2193 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
2194 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
2195 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
2196 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
2197 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
2198 /* batch 2 */
2199 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
2200 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
2201 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
2202 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
2203 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
2204 /* batch 3 */
2205 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
2206 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
2207 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
2208 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
2209 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
2210
2211 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
2212 vecSize, batchSize, expectedOutput);
2213}
2214
2215
2216void LstmUtilsVectorBatchVectorAddTest()
2217{
2218 uint32_t batchSize = 2;
2219 uint32_t vecSize = 3;
2220 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
2221 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
2222 { 0.0f, -0.5f, 1.0f}));
2223
2224 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
2225 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2226 { 1.0f, 2.0f, 3.0f, //batch 0
2227 4.0f, 5.0f, 6.0f})); //batch 1
2228
2229 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
2230 { 1.0f, 1.5f, 4.0f,
2231 4.0f, 4.5f, 7.0f}));
2232
2233 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
2234 vecSize, batchSize, expectedOutput);
2235}
2236
2237
telsoa01c577f2c2018-08-31 09:22:23 +01002238LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002239 armnn::IWorkloadFactory& workloadFactory,
2240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002241{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002242 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002243 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2244 { 2., 3., 3., 4. }));
2245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002246 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002247 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2248 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2249 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002250 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002251 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002252}
2253
2254LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01002255 armnn::IWorkloadFactory& workloadFactory,
2256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002257{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002258 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002259 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2260 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2261 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
2262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002263 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002264 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2265 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2266 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2267 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2268 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2269 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2270 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
2271 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01002272 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
2273 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002274}
2275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002276LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
2277 armnn::IWorkloadFactory& workloadFactory,
2278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01002279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002280 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002281 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2282 {2., 3., 3., 4.}));
2283
2284
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002285 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01002286 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2287 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2288 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
2289
Conor Kennedyb9971c92019-05-07 07:14:23 +01002290 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002291 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01002292}
2293
Jan Eilers38e05bd2019-06-26 13:10:09 +01002294
2295LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
2300 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
2301 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
2302 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
2303
2304 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2305 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2306 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2307 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2308 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2309 workloadFactory, memoryManager, input, expectedOutput);
2310}
2311
2312
Conor Kennedyb9971c92019-05-07 07:14:23 +01002313LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2314 armnn::IWorkloadFactory& workloadFactory,
2315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2316{
2317 const float qScale = 1.0f;
2318 const int32_t qOffset = 0;
2319
2320 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2321 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2322
2323 armnn::TensorInfo inputDesc({2, 2}, datatype);
2324 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2325 std::vector<float>{2., 3., 3., 4.}));
2326
2327 armnn::TensorInfo outputDesc({2, 4}, datatype);
2328 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2329 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2330 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2331
2332 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2333 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2334
2335}
2336
2337LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2338 armnn::IWorkloadFactory& workloadFactory,
2339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2340{
2341 const float qScale = 1.0f;
2342 const int32_t qOffset = 0;
2343
2344 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2345 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2346
2347 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2348 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2349 std::vector<float>({ 2., 3., 3., 4. })));
2350
2351 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2352 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2353 qOffset, std::vector<float>(
2354 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2355 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2356
2357 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2358 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2359}
2360
2361LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2362 armnn::IWorkloadFactory& workloadFactory,
2363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2364{
2365 const float qScale = 2.0f;
2366 const int32_t qOffset = 0;
2367
2368 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2369 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2370
2371 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2372 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2373 qOffset, std::vector<float>(
2374 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2375 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2376
2377 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2378 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2379 qOffset, std::vector<float>(
2380 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2381 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2382 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2383 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2384 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2385 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2386
2387 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2388 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2389}
2390
2391LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2392 armnn::IWorkloadFactory& workloadFactory,
2393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2394{
2395 const float qScale = 1.0f;
2396 const int32_t qOffset = 0;
2397
2398 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2399
2400 armnn::TensorInfo inputDesc({2, 2}, datatype);
2401 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2402 qOffset, std::vector<float>{2., 3., 3., 4.}));
2403
2404 armnn::TensorInfo outputDesc({2, 4}, datatype);
2405 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2406 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2407 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2408
2409 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2410 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2411}
2412
James Conroy9c3cae82019-08-01 16:01:48 +01002413// QuantizedLstm
2414LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2415 armnn::IWorkloadFactory& workloadFactory,
2416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2417{
2418 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2419 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2420 {166, 179, 50, 150}));
2421
2422 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2423 boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2424 {140, 151, 146, 112, 136, 156, 142, 112 }));
2425
2426 return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2427}
2428
Jim Flynn4ed6c832019-05-20 11:02:46 +01002429LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002430 armnn::IWorkloadFactory& workloadFactory,
2431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002432{
surmeh013537c2c2018-05-18 16:31:43 +01002433 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002434 unsigned int outputHeight = 6;
2435 unsigned int outputChannels = 3;
2436
surmeh013537c2c2018-05-18 16:31:43 +01002437 unsigned int inputWidth1 = 3;
2438 unsigned int inputHeight1 = 6;
2439 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002440
surmeh013537c2c2018-05-18 16:31:43 +01002441 unsigned int inputWidth2 = 3;
2442 unsigned int inputHeight2 = 6;
2443 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002444
telsoa01c577f2c2018-08-31 09:22:23 +01002445 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002446 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2447 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2448 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002449
2450 LayerTestResult<float,3> ret(outputTensorInfo);
2451
telsoa014fcda012018-03-09 14:13:49 +00002452 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002453 {
2454 1.0f, 2.0f, 3.0f,
2455 4.0f, 5.0f, 6.0f,
2456 7.0f, 8.0f, 9.0f,
2457 10.0f, 11.0f, 12.0f,
2458 13.0f, 14.0f, 15.0f,
2459 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002460
surmeh013537c2c2018-05-18 16:31:43 +01002461 19.0f, 20.0f, 21.0f,
2462 22.0f, 23.0f, 24.0f,
2463 25.0f, 26.0f, 27.0f,
2464 28.0f, 29.0f, 30.0f,
2465 31.0f, 32.0f, 33.0f,
2466 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002467
surmeh013537c2c2018-05-18 16:31:43 +01002468 37.0f, 38.0f, 39.0f,
2469 40.0f, 41.0f, 42.0f,
2470 43.0f, 44.0f, 45.0f,
2471 46.0f, 47.0f, 48.0f,
2472 49.0f, 50.0f, 51.0f,
2473 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002474 })
2475 );
2476
telsoa014fcda012018-03-09 14:13:49 +00002477 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2478 {
surmeh013537c2c2018-05-18 16:31:43 +01002479 1.0f, 2.0f, 3.0f,
2480 4.0f, 5.0f, 6.0f,
2481 7.0f, 8.0f, 9.0f,
2482 10.0f, 11.0f, 12.0f,
2483 13.0f, 14.0f, 15.0f,
2484 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002485
surmeh013537c2c2018-05-18 16:31:43 +01002486 19.0f, 20.0f, 21.0f,
2487 22.0f, 23.0f, 24.0f,
2488 25.0f, 26.0f, 27.0f,
2489 28.0f, 29.0f, 30.0f,
2490 31.0f, 32.0f, 33.0f,
2491 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002492 })
2493 );
2494
2495 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2496 {
surmeh013537c2c2018-05-18 16:31:43 +01002497 37.0f, 38.0f, 39.0f,
2498 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002499 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002500 46.0f, 47.0f, 48.0f,
2501 49.0f, 50.0f, 51.0f,
2502 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002503 })
2504 );
2505
telsoa01c577f2c2018-08-31 09:22:23 +01002506 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002507 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002508
telsoa01c577f2c2018-08-31 09:22:23 +01002509 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002510 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002511
telsoa014fcda012018-03-09 14:13:49 +00002512 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2513
2514 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2515
2516 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2517 subTensorsSupported ?
2518 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2519 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2520
2521 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2522 subTensorsSupported ?
2523 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2524 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2525
Jim Flynne242f2d2019-05-22 14:24:13 +01002526 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002527 armnn::WorkloadInfo info;
2528 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2529 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002530 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2531
2532 data.m_ViewOrigins.push_back(window1);
2533 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002534
Jim Flynn4ed6c832019-05-20 11:02:46 +01002535 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002536
2537 inputHandle1->Allocate();
2538 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002539 outputHandle->Allocate();
2540
2541 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2542 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002543
Derek Lambertif30f7d32019-04-09 10:25:02 +01002544 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002545 workload->Execute();
2546
2547 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2548
2549 return ret;
2550}
2551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002552LayerTestResult<float,4> AdditionTest(
2553 armnn::IWorkloadFactory& workloadFactory,
2554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002555{
2556 unsigned int batchSize = 2;
2557 unsigned int channels = 2;
2558 unsigned int height = 2;
2559 unsigned int width = 3;
2560
2561 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2562 armnn::TensorInfo outputTensorInfo;
2563
2564 unsigned int shape[] = {batchSize, channels, height, width};
2565
2566 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2567 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2568 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2569
2570
2571 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2572 {
2573 0.0f, 2.0f, 1.0f,
2574 0.2f, 1.0f, 2.0f,
2575
2576 1.0f, 2.0f, 1.0f,
2577 0.2f, 1.0f, 2.0f,
2578
2579 0.0f, 2.0f, 1.0f,
2580 4.2f, 1.0f, 2.0f,
2581
2582 0.0f, 0.0f, 1.0f,
2583 0.2f, 1.0f, 2.0f,
2584 }));
2585
2586 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2587 {
2588 1.0f, 2.0f, 1.0f,
2589 0.0f, 1.0f, 2.0f,
2590
2591 1.0f, 2.0f, -2.0f,
2592 0.2f, 1.0f, 2.0f,
2593
2594 0.0f, 2.0f, 1.0f,
2595 4.2f, 0.0f, -3.0f,
2596
2597 0.0f, 0.0f, 1.0f,
2598 0.7f, 1.0f, 5.0f,
2599 }));
2600
2601 LayerTestResult<float,4> ret(outputTensorInfo);
2602 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2603 {
2604 1.0f, 4.0f, 2.0f,
2605 0.2f, 2.0f, 4.0f,
2606
2607 2.0f, 4.0f, -1.0f,
2608 0.4f, 2.0f, 4.0f,
2609
2610 0.0f, 4.0f, 2.0f,
2611 8.4f, 1.0f, -1.0f,
2612
2613 0.0f, 0.0f, 2.0f,
2614 0.9f, 2.0f, 7.0f,
2615 }));
2616
2617 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2618 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2619 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2620
2621 armnn::AdditionQueueDescriptor data;
2622 armnn::WorkloadInfo info;
2623 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2624 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2625 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2626
2627 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2628
2629 inputHandle1->Allocate();
2630 inputHandle2->Allocate();
2631 outputHandle->Allocate();
2632
2633 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2634 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2635
Derek Lambertif30f7d32019-04-09 10:25:02 +01002636 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002637 workload->Execute();
2638
2639 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2640
2641 return ret;
2642}
2643
Matthew Jacksondba634f2019-08-15 15:14:18 +01002644LayerTestResult<float, 5> Addition5dTest(
2645 armnn::IWorkloadFactory& workloadFactory,
2646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2647{
2648 unsigned int depth = 2;
2649 unsigned int batchSize = 2;
2650 unsigned int channels = 2;
2651 unsigned int height = 2;
2652 unsigned int width = 3;
2653
2654 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2655 armnn::TensorInfo outputTensorInfo;
2656
2657 unsigned int shape[] = {depth, batchSize, channels, height, width};
2658
2659 inputTensorInfo1 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2660 inputTensorInfo2 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2661 outputTensorInfo = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
2662
2663
2664 auto input1 = MakeTensor<float, 5>(inputTensorInfo1, std::vector<float>(
2665 {
2666 2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
2667 2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
2668
2669 2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
2670 0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
2671
2672
2673 1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
2674 1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
2675
2676 0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
2677 0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
2678
2679 }));
2680
2681 auto input2 = MakeTensor<float, 5>(inputTensorInfo2, std::vector<float>(
2682 {
2683 4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
2684 1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
2685
2686 4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
2687 0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
2688
2689
2690 0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
2691 2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
2692
2693 3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
2694 2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
2695 }));
2696
2697 LayerTestResult<float, 5> ret(outputTensorInfo);
2698 ret.outputExpected = MakeTensor<float, 5>(outputTensorInfo, std::vector<float>(
2699 {
2700 7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
2701 4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
2702
2703 7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
2704 0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
2705
2706
2707 1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
2708 3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
2709
2710 4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
2711 2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
2712 }));
2713
2714 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2715 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2716 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2717
2718 armnn::AdditionQueueDescriptor data;
2719 armnn::WorkloadInfo info;
2720 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2721 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2722 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2723
2724 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2725
2726 inputHandle1->Allocate();
2727 inputHandle2->Allocate();
2728 outputHandle->Allocate();
2729
2730 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0][0]);
2731 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0][0]);
2732
2733 workload->PostAllocationConfigure();
2734 workload->Execute();
2735
2736 CopyDataFromITensorHandle(&ret.output[0][0][0][0][0], outputHandle.get());
2737
2738 return ret;
2739}
2740
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002741template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002742LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2743 armnn::IWorkloadFactory& workloadFactory,
2744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002745 float qScale,
2746 int32_t qOffset)
2747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002748 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2749 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2750 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002751
2752 if (armnn::IsQuantizedType<T>())
2753 {
2754 inputTensorInfo1.SetQuantizationScale(qScale);
2755 inputTensorInfo1.SetQuantizationOffset(qOffset);
2756 inputTensorInfo2.SetQuantizationScale(qScale);
2757 inputTensorInfo2.SetQuantizationOffset(qOffset);
2758 outputTensorInfo.SetQuantizationScale(qScale);
2759 outputTensorInfo.SetQuantizationOffset(qOffset);
2760 }
2761
2762 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2763 {
2764 0.0f,
2765 1.0f,
2766
2767 2.0f,
2768 3.0f,
2769
2770 4.0f,
2771 5.0f,
2772 }));
2773
2774 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2775 {
2776 0.5f, 1.5f, 2.5f,
2777 3.5f, 4.5f, 5.5f,
2778 }));
2779
2780 LayerTestResult<T,4> ret(outputTensorInfo);
2781 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2782 {
2783 0.5f, 1.5f, 2.5f,
2784 4.5f, 5.5f, 6.5f,
2785
2786 2.5f, 3.5f, 4.5f,
2787 6.5f, 7.5f, 8.5f,
2788
2789 4.5f, 5.5f, 6.5f,
2790 8.5f, 9.5f, 10.5f,
2791 }));
2792
2793 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2794 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2795 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2796
2797 armnn::AdditionQueueDescriptor data;
2798 armnn::WorkloadInfo info;
2799 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2800 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2801 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2802
2803 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2804
2805 inputHandle1->Allocate();
2806 inputHandle2->Allocate();
2807 outputHandle->Allocate();
2808
2809 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2810 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2811
Derek Lambertif30f7d32019-04-09 10:25:02 +01002812 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002813 workload->Execute();
2814
2815 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2816
2817 return ret;
2818}
2819
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002820template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002821LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002824 float qScale,
2825 int32_t qOffset)
2826{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002827 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2828 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2829 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002830
2831 if (armnn::IsQuantizedType<T>())
2832 {
2833 inputTensorInfo1.SetQuantizationScale(qScale);
2834 inputTensorInfo1.SetQuantizationOffset(qOffset);
2835 inputTensorInfo2.SetQuantizationScale(qScale);
2836 inputTensorInfo2.SetQuantizationOffset(qOffset);
2837 outputTensorInfo.SetQuantizationScale(qScale);
2838 outputTensorInfo.SetQuantizationOffset(qOffset);
2839 }
2840
2841 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2842 {
2843 0.0f, 1.0f, 2.0f,
2844 3.0f, 4.0f, 5.0f,
2845 6.0f, 7.0f, 8.0f,
2846 9.0f, 10.0f, 11.0f,
2847 12.0f, 13.0f, 14.0f,
2848 15.0f, 16.0f, 17.0f,
2849 }));
2850
2851 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2852 {
2853 0.5f,
2854 }));
2855
2856 LayerTestResult<T,4> ret(outputTensorInfo);
2857 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2858 {
2859 0.5f, 1.5f, 2.5f,
2860 3.5f, 4.5f, 5.5f,
2861 6.5f, 7.5f, 8.5f,
2862 9.5f, 10.5f, 11.5f,
2863 12.5f, 13.5f, 14.5f,
2864 15.5f, 16.5f, 17.5f,
2865 }));
2866
2867 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2868 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2869 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2870
2871 armnn::AdditionQueueDescriptor data;
2872 armnn::WorkloadInfo info;
2873 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2874 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2875 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2876
2877 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2878
2879 inputHandle1->Allocate();
2880 inputHandle2->Allocate();
2881 outputHandle->Allocate();
2882
2883 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2884 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2885
Derek Lambertif30f7d32019-04-09 10:25:02 +01002886 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002887 workload->Execute();
2888
2889 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2890
2891 return ret;
2892}
2893
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002894LayerTestResult<float, 4> AdditionBroadcastTest(
2895 armnn::IWorkloadFactory& workloadFactory,
2896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002897{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002898 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2899 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002900}
2901
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002902LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2903 armnn::IWorkloadFactory& workloadFactory,
2904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002905{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002906 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2907 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002908}
2909
Sadik Armagan2999a022019-04-09 14:20:12 +01002910LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2911 armnn::IWorkloadFactory& workloadFactory,
2912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2913{
2914 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2915 workloadFactory, memoryManager, 2.f, 0);
2916}
2917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002918LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2919 armnn::IWorkloadFactory& workloadFactory,
2920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002921{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002922 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2923 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002924}
2925
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002926LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2927 armnn::IWorkloadFactory& workloadFactory,
2928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002929{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002930 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2931 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002932}
2933
Sadik Armagan2999a022019-04-09 14:20:12 +01002934LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2935 armnn::IWorkloadFactory& workloadFactory,
2936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2937{
2938 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2939 workloadFactory, memoryManager, 0.1333333f, 0);
2940}
2941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002942LayerTestResult<float,4> CompareAdditionTest(
2943 armnn::IWorkloadFactory& workloadFactory,
2944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2945 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002946{
2947 unsigned int batchSize = 4;
2948 unsigned int channels = 1;
2949 unsigned int height = 2;
2950 unsigned int width = 3;
2951
2952 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2953 armnn::TensorInfo outputTensorInfo;
2954
2955 unsigned int shape[] = {batchSize, channels, height, width};
2956
2957 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2958 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2959 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2960
2961 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2962 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2963
2964 LayerTestResult<float,4> ret(outputTensorInfo);
2965
2966 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2967 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2968 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2969
2970 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2971 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2972 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2973
2974 armnn::AdditionQueueDescriptor data;
2975 armnn::WorkloadInfo info;
2976 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2977 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2978 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2979
2980 armnn::AdditionQueueDescriptor refData = data;
2981 armnn::WorkloadInfo refInfo = info;
2982 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2983 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2984 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2985
2986 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2987 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2988
2989 inputHandle1->Allocate();
2990 inputHandle2->Allocate();
2991 outputHandle->Allocate();
2992 inputHandle1Ref->Allocate();
2993 inputHandle2Ref->Allocate();
2994 outputHandleRef->Allocate();
2995
2996 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2997 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2998 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2999 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
3000
Derek Lambertif30f7d32019-04-09 10:25:02 +01003001 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003002 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003003 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003004 workloadRef->Execute();
3005
3006 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3007 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3008
3009 return ret;
3010}
3011
surmeh01bceff2f2018-03-29 16:29:27 +01003012namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01003013template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003014LayerTestResult<T, 4> DivisionTestHelper(
3015 armnn::IWorkloadFactory& workloadFactory,
3016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3017 const unsigned int shape0[4],
3018 const std::vector<T>& values0,
3019 float scale0,
3020 int32_t offset0,
3021 const unsigned int shape1[4],
3022 const std::vector<T> & values1,
3023 float scale1,
3024 int32_t offset1,
3025 const unsigned int outShape[4],
3026 const std::vector<T> & outValues,
3027 float outScale,
3028 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01003029{
Sadik Armagan2999a022019-04-09 14:20:12 +01003030 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
3031 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
3032 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003033
David Beck5cd01f32018-09-12 16:00:08 +01003034 inputTensorInfo0.SetQuantizationScale(scale0);
3035 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003036
David Beck5cd01f32018-09-12 16:00:08 +01003037 inputTensorInfo1.SetQuantizationScale(scale1);
3038 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003039
David Beck5cd01f32018-09-12 16:00:08 +01003040 outputTensorInfo.SetQuantizationScale(outScale);
3041 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003042
David Beck5cd01f32018-09-12 16:00:08 +01003043 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
3044 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003045
David Beck5cd01f32018-09-12 16:00:08 +01003046 LayerTestResult<T, 4> result(outputTensorInfo);
3047 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003048
David Beck5cd01f32018-09-12 16:00:08 +01003049 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3050 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3051 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003052
David Beck5cd01f32018-09-12 16:00:08 +01003053 armnn::DivisionQueueDescriptor data;
3054 armnn::WorkloadInfo info;
3055 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3056 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3057 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003058
David Beck5cd01f32018-09-12 16:00:08 +01003059 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003060
David Beck5cd01f32018-09-12 16:00:08 +01003061 inputHandle0->Allocate();
3062 inputHandle1->Allocate();
3063 outputHandle->Allocate();
3064
3065 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3066 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3067
Derek Lambertif30f7d32019-04-09 10:25:02 +01003068 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01003069 workload->Execute();
3070
3071 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
3072
3073 return result;
3074}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003075} // anonymous namespace
3076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003077LayerTestResult<float,4> DivisionByZeroTest(
3078 armnn::IWorkloadFactory& workloadFactory,
3079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01003080{
3081 const unsigned int width = 2;
3082 const unsigned int height = 2;
3083 const unsigned int channelCount = 2;
3084 const unsigned int batchSize = 2;
3085
3086 unsigned int shape[] = { batchSize, channelCount, height, width };
3087
3088 std::vector<float> input0({
3089 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
3090 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
3091
3092 std::vector<float> input1({
3093 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
3094 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
3095
3096 std::vector<float> output({
3097 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
3098 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
3099
Sadik Armagan2999a022019-04-09 14:20:12 +01003100 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3101 memoryManager,
3102 shape, input0, 1.0f, 0,
3103 shape, input1, 1.0f, 0,
3104 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01003105}
3106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003107LayerTestResult<float,4> DivisionTest(
3108 armnn::IWorkloadFactory& workloadFactory,
3109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003110{
3111 const unsigned int width = 2;
3112 const unsigned int height = 2;
3113 const unsigned int channelCount = 2;
3114 const unsigned int batchSize = 2;
3115
3116 unsigned int shape[] = { batchSize, channelCount, height, width };
3117
3118 std::vector<float> input0({
3119 2, 2, 2, 2, 3, 3, 3, 3,
3120 4, 4, 4, 4, 5, 5, 5, 5 });
3121
3122 std::vector<float> input1({
3123 1, 1, 1, 1, 2, 2, 2, 2,
3124 4, 4, 4, 4, 4, 4, 4, 4 });
3125
3126 std::vector<float> output({
3127 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
3128 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
3129
David Beck5cd01f32018-09-12 16:00:08 +01003130
Sadik Armagan2999a022019-04-09 14:20:12 +01003131 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3132 memoryManager,
3133 shape, input0, 1.0f, 0,
3134 shape, input1, 1.0f, 0,
3135 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003136}
3137
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003138LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
3139 armnn::IWorkloadFactory& workloadFactory,
3140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003141{
3142 unsigned int shape0[] = { 1, 2, 2, 2 };
3143 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3144
3145 unsigned int shape1[] = { 1, 1, 1, 1 };
3146 std::vector<float> input1({ 2 });
3147
3148 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3149
David Beck5cd01f32018-09-12 16:00:08 +01003150
Sadik Armagan2999a022019-04-09 14:20:12 +01003151 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3152 memoryManager,
3153 shape0, input0, 1.0f, 0,
3154 shape1, input1, 1.0f, 0,
3155 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003156}
3157
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003158LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
3159 armnn::IWorkloadFactory& workloadFactory,
3160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003161{
3162 unsigned int shape0[] = { 1, 3, 3, 2 };
3163 std::vector<float> input0({
3164 1, 4, 3, 8, 5, 12,
3165 7, 16, 9, 20, 11, 24,
3166 13, 28, 15, 32, 17, 36});
3167
3168 unsigned int shape1[] = { 1, 1, 1, 2 };
3169 std::vector<float> input1({ 1, 2 });
3170
3171 std::vector<float> output({
3172 1, 2, 3, 4, 5, 6,
3173 7, 8, 9, 10, 11, 12,
3174 13, 14, 15, 16, 17, 18});
3175
Sadik Armagan2999a022019-04-09 14:20:12 +01003176 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
3177 memoryManager,
3178 shape0, input0, 1.0f, 0,
3179 shape1, input1, 1.0f, 0,
3180 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003181}
3182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003183LayerTestResult<uint8_t,4> DivisionUint8Test(
3184 armnn::IWorkloadFactory& workloadFactory,
3185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003186{
3187 const unsigned int width = 2;
3188 const unsigned int height = 2;
3189 const unsigned int channelCount = 2;
3190 const unsigned int batchSize = 2;
3191
3192 unsigned int shape[] = { batchSize, channelCount, height, width };
3193
3194 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3195 4, 4, 4, 4, 5, 5, 5, 5 });
3196
3197 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3198 4, 4, 4, 4, 4, 4, 4, 4 });
3199
3200 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3201 4, 4, 4, 4, 5, 5, 5, 5});
3202
3203
Sadik Armagan2999a022019-04-09 14:20:12 +01003204 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3205 memoryManager,
3206 shape, input0, 1.0f, 0,
3207 shape, input1, 1.0f, 0,
3208 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003209}
3210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003211LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
3212 armnn::IWorkloadFactory& workloadFactory,
3213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003214{
3215 unsigned int shape0[] = { 1, 2, 2, 2 };
3216 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3217
3218 unsigned int shape1[] = { 1, 1, 1, 1 };
3219 std::vector<uint8_t> input1({ 2 });
3220
3221 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3222
Sadik Armagan2999a022019-04-09 14:20:12 +01003223 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3224 memoryManager,
3225 shape0, input0, 1.0f, 0,
3226 shape1, input1, 1.0f, 0,
3227 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01003228}
3229
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003230LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
3231 armnn::IWorkloadFactory& workloadFactory,
3232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01003233{
3234 unsigned int shape0[] = { 1, 3, 3, 2 };
3235 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
3236 7, 16, 9, 20, 11, 24,
3237 13, 28, 15, 32, 17, 36});
3238
3239 unsigned int shape1[] = { 1, 1, 1, 2 };
3240 std::vector<uint8_t> input1({ 1, 2 });
3241
3242 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
3243 7, 8, 9, 10, 11, 12,
3244 13, 14, 15, 16, 17, 18});
3245
Sadik Armagan2999a022019-04-09 14:20:12 +01003246 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
3247 memoryManager,
3248 shape0, input0, 1.0f, 0,
3249 shape1, input1, 1.0f, 0,
3250 shape0, output, 1.0f, 0);
3251}
3252
3253LayerTestResult<int16_t,4> DivisionInt16Test(
3254 armnn::IWorkloadFactory& workloadFactory,
3255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3256{
3257 unsigned int shape[] = { 2, 2, 2, 2 };
3258
3259 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
3260 4, 4, 4, 4, 5, 5, 5, 5 });
3261
3262 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
3263 4, 4, 4, 4, 4, 4, 4, 4 });
3264
3265 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
3266 4, 4, 4, 4, 5, 5, 5, 5});
3267
3268
3269 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3270 memoryManager,
3271 shape, input0, 1.0f, 0,
3272 shape, input1, 1.0f, 0,
3273 shape, output, 0.25f, 0);
3274}
3275
3276LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
3277 armnn::IWorkloadFactory& workloadFactory,
3278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3279{
3280 unsigned int shape0[] = { 1, 2, 2, 2 };
3281 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
3282
3283 unsigned int shape1[] = { 1, 1, 1, 1 };
3284 std::vector<int16_t> input1({ 2 });
3285
3286 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
3287
3288 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3289 memoryManager,
3290 shape0, input0, 1.0f, 0,
3291 shape1, input1, 1.0f, 0,
3292 shape0, output, 1.0f, 0);
3293}
3294
3295LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
3296 armnn::IWorkloadFactory& workloadFactory,
3297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3298{
3299 unsigned int shape0[] = { 1, 3, 3, 2 };
3300 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
3301 7, 16, 9, 20, 11, 24,
3302 13, 28, 15, 32, 17, 36});
3303
3304 unsigned int shape1[] = { 1, 1, 1, 2 };
3305 std::vector<int16_t> input1({ 1, 2 });
3306
3307 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
3308 7, 8, 9, 10, 11, 12,
3309 13, 14, 15, 16, 17, 18});
3310
3311 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
3312 memoryManager,
3313 shape0, input0, 1.0f, 0,
3314 shape1, input1, 1.0f, 0,
3315 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003316}
3317
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003318template<typename DescriptorType>
3319std::unique_ptr<armnn::IWorkload> CreateWorkload(
3320 const armnn::IWorkloadFactory& workloadFactory,
3321 const armnn::WorkloadInfo& info,
3322 const DescriptorType& descriptor)
3323{
3324 return CreateWorkload(workloadFactory, info, descriptor);
3325};
3326
3327template<>
3328std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
3329 const armnn::IWorkloadFactory& workloadFactory,
3330 const armnn::WorkloadInfo& info,
3331 const armnn::MaximumQueueDescriptor& descriptor)
3332{
3333 return workloadFactory.CreateMaximum(descriptor, info);
3334}
3335
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003336template<>
3337std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
3338 const armnn::IWorkloadFactory& workloadFactory,
3339 const armnn::WorkloadInfo& info,
3340 const armnn::MinimumQueueDescriptor& descriptor)
3341{
3342 return workloadFactory.CreateMinimum(descriptor, info);
3343}
3344
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003345template<>
3346std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
3347 const armnn::IWorkloadFactory& workloadFactory,
3348 const armnn::WorkloadInfo& info,
3349 const armnn::EqualQueueDescriptor& descriptor)
3350{
3351 return workloadFactory.CreateEqual(descriptor, info);
3352}
3353
FrancisMurtagh878f0232018-12-19 10:56:15 +00003354template<>
3355std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
3356 const armnn::IWorkloadFactory& workloadFactory,
3357 const armnn::WorkloadInfo& info,
3358 const armnn::GreaterQueueDescriptor& descriptor)
3359{
3360 return workloadFactory.CreateGreater(descriptor, info);
3361}
3362
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003363namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00003364
3365template <typename Descriptor,
3366 armnn::DataType ArmnnTypeInput,
3367 armnn::DataType ArmnnTypeOutput,
3368 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
3369 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
3370LayerTestResult<TOutput, 4> ElementwiseTestHelper(
3371 armnn::IWorkloadFactory & workloadFactory,
3372 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3373 const unsigned int shape0[4], std::vector<TInput> values0,
3374 const unsigned int shape1[4], std::vector<TInput> values1,
3375 const unsigned int outShape[4], std::vector<TOutput> outValues,
3376 float qScale = 0.0f, int qOffset = 0)
3377{
Rob Hughes9e10c2b2019-07-23 15:37:19 +01003378 const uint32_t dimensionCount = 4;
kevmay012b4d88e2019-01-24 14:05:09 +00003379 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
3380 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
3381 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
3382
3383 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
3384 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
3385
3386 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003387 {
kevmay012b4d88e2019-01-24 14:05:09 +00003388 inputTensorInfo0.SetQuantizationScale(qScale);
3389 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003390
kevmay012b4d88e2019-01-24 14:05:09 +00003391 inputTensorInfo1.SetQuantizationScale(qScale);
3392 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003393
kevmay012b4d88e2019-01-24 14:05:09 +00003394 outputTensorInfo.SetQuantizationScale(qScale);
3395 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003396 }
kevmay012b4d88e2019-01-24 14:05:09 +00003397
3398 LayerTestResult<TOutput,4> ret(outputTensorInfo);
3399
3400 if(ArmnnTypeOutput == armnn::DataType::Boolean)
3401 {
3402 ret.compareBoolean = true;
3403 }
3404
3405 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3406 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3407 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3408
3409 Descriptor data;
3410 armnn::WorkloadInfo info;
3411 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3412 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3413 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3414 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
3415
3416 inputHandle0->Allocate();
3417 inputHandle1->Allocate();
3418 outputHandle->Allocate();
3419
3420 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3421 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3422
Derek Lambertif30f7d32019-04-09 10:25:02 +01003423 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00003424 ExecuteWorkload(*workload, memoryManager);
3425
3426 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3427
3428 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3429 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003430}
3431
kevmay012b4d88e2019-01-24 14:05:09 +00003432template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3433LayerTestResult<T, 4> ElementwiseTestHelper(
3434 armnn::IWorkloadFactory & workloadFactory,
3435 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3436 const unsigned int shape0[4], std::vector<T> values0,
3437 const unsigned int shape1[4], std::vector<T> values1,
3438 const unsigned int outShape[4], std::vector<T> outValues,
3439 float qScale = 0.0f, int qOffset = 0)
3440{
3441 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3442 (workloadFactory,
3443 memoryManager,
3444 shape0,
3445 values0,
3446 shape1,
3447 values1,
3448 outShape,
3449 outValues,
3450 qScale,
3451 qOffset);
3452}
3453}
3454
3455LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003457{
3458 const unsigned int width = 2;
3459 const unsigned int height = 2;
3460 const unsigned int channelCount = 2;
3461 const unsigned int batchSize = 2;
3462
3463 unsigned int shape[] = { batchSize, channelCount, height, width };
3464
3465 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3466 3, 3, 3, 3, 4, 4, 4, 4 });
3467
3468 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3469 5, 5, 5, 5, 4, 4, 4, 4 });
3470
kevmay012b4d88e2019-01-24 14:05:09 +00003471 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3472 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003473
kevmay012b4d88e2019-01-24 14:05:09 +00003474 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003475 workloadFactory,
3476 memoryManager,
3477 shape,
3478 input0,
3479 shape,
3480 input1,
3481 shape,
3482 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003483}
3484
kevmay012b4d88e2019-01-24 14:05:09 +00003485LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003486 armnn::IWorkloadFactory& workloadFactory,
3487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3488{
3489 unsigned int shape0[] = { 1, 2, 2, 2 };
3490 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3491
3492 unsigned int shape1[] = { 1, 1, 1, 1 };
3493 std::vector<float> input1({ 1 });
3494
kevmay012b4d88e2019-01-24 14:05:09 +00003495 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003496
kevmay012b4d88e2019-01-24 14:05:09 +00003497 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003498 workloadFactory,
3499 memoryManager,
3500 shape0,
3501 input0,
3502 shape1,
3503 input1,
3504 shape0,
3505 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003506}
3507
kevmay012b4d88e2019-01-24 14:05:09 +00003508LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003509 armnn::IWorkloadFactory& workloadFactory,
3510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3511{
3512 const unsigned int shape0[] = { 1, 2, 2, 3 };
3513 const unsigned int shape1[] = { 1, 1, 1, 3 };
3514
3515 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3516 7, 8, 9, 10, 11, 12 });
3517
3518 std::vector<float> input1({ 1, 2, 3});
3519
kevmay012b4d88e2019-01-24 14:05:09 +00003520 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3521 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003522
kevmay012b4d88e2019-01-24 14:05:09 +00003523 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003524 workloadFactory,
3525 memoryManager,
3526 shape0,
3527 input0,
3528 shape1,
3529 input1,
3530 shape0,
3531 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003532}
3533
3534LayerTestResult<uint8_t, 4> EqualUint8Test(
3535 armnn::IWorkloadFactory& workloadFactory,
3536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3537{
3538 unsigned int shape[] = { 2, 2, 2, 2 };
3539
3540 // See dequantized values to the right.
3541 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003542 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003543
3544 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3545 3, 3, 3, 3, 5, 5, 5, 5 });
3546
3547 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3548 1, 1, 1, 1, 0, 0, 0, 0 });
3549
kevmay012b4d88e2019-01-24 14:05:09 +00003550 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3551 armnn::DataType::QuantisedAsymm8,
3552 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003553 workloadFactory,
3554 memoryManager,
3555 shape,
3556 input0,
3557 shape,
3558 input1,
3559 shape,
3560 output,
3561 1.0f,
3562 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003563}
3564
3565LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3566 armnn::IWorkloadFactory& workloadFactory,
3567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3568{
3569 const unsigned int shape0[] = { 1, 2, 2, 3 };
3570 const unsigned int shape1[] = { 1, 1, 1, 1 };
3571
3572 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3573 7, 8, 9, 10, 11, 12 });
3574
3575 std::vector<uint8_t> input1({ 1 });
3576
3577 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3578 0, 0, 0, 0, 0, 0 });
3579
kevmay012b4d88e2019-01-24 14:05:09 +00003580 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3581 armnn::DataType::QuantisedAsymm8,
3582 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003583 workloadFactory,
3584 memoryManager,
3585 shape0,
3586 input0,
3587 shape1,
3588 input1,
3589 shape0,
3590 output,
3591 1.0f,
3592 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003593}
3594
3595LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3596 armnn::IWorkloadFactory& workloadFactory,
3597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3598{
3599 const unsigned int shape0[] = { 1, 2, 2, 3 };
3600 const unsigned int shape1[] = { 1, 1, 1, 3 };
3601
3602 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3603 7, 8, 9, 10, 11, 12 });
3604
3605 std::vector<uint8_t> input1({ 1, 1, 3});
3606
3607 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3608 0, 0, 0, 0, 0, 0 });
3609
kevmay012b4d88e2019-01-24 14:05:09 +00003610 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3611 armnn::DataType::QuantisedAsymm8,
3612 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003613 workloadFactory,
3614 memoryManager,
3615 shape0,
3616 input0,
3617 shape1,
3618 input1,
3619 shape0,
3620 output,
3621 1.0f,
3622 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003623}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003624
kevmay012b4d88e2019-01-24 14:05:09 +00003625LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3627{
3628 const unsigned int width = 2;
3629 const unsigned int height = 2;
3630 const unsigned int channelCount = 2;
3631 const unsigned int batchSize = 2;
3632
3633 unsigned int shape[] = { batchSize, channelCount, height, width };
3634
3635 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3636 3, 3, 3, 3, 4, 4, 4, 4 });
3637
3638 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3639 5, 5, 5, 5, 4, 4, 4, 4 });
3640
kevmay012b4d88e2019-01-24 14:05:09 +00003641 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3642 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003643
kevmay012b4d88e2019-01-24 14:05:09 +00003644 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003645 workloadFactory,
3646 memoryManager,
3647 shape,
3648 input0,
3649 shape,
3650 input1,
3651 shape,
3652 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003653}
3654
kevmay012b4d88e2019-01-24 14:05:09 +00003655LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003656 armnn::IWorkloadFactory& workloadFactory,
3657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3658{
3659 unsigned int shape0[] = { 1, 2, 2, 2 };
3660 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3661
3662 unsigned int shape1[] = { 1, 1, 1, 1 };
3663 std::vector<float> input1({ 1 });
3664
kevmay012b4d88e2019-01-24 14:05:09 +00003665 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003666
kevmay012b4d88e2019-01-24 14:05:09 +00003667 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003668 workloadFactory,
3669 memoryManager,
3670 shape0,
3671 input0,
3672 shape1,
3673 input1,
3674 shape0,
3675 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003676}
3677
kevmay012b4d88e2019-01-24 14:05:09 +00003678LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003679 armnn::IWorkloadFactory& workloadFactory,
3680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3681{
3682 const unsigned int shape0[] = { 1, 2, 2, 3 };
3683 const unsigned int shape1[] = { 1, 1, 1, 3 };
3684
3685 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3686 7, 8, 9, 10, 11, 12 });
3687
3688 std::vector<float> input1({ 1, 3, 2});
3689
kevmay012b4d88e2019-01-24 14:05:09 +00003690 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3691 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003692
kevmay012b4d88e2019-01-24 14:05:09 +00003693 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003694 workloadFactory,
3695 memoryManager,
3696 shape0,
3697 input0,
3698 shape1,
3699 input1,
3700 shape0,
3701 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003702}
3703
3704LayerTestResult<uint8_t, 4> GreaterUint8Test(
3705 armnn::IWorkloadFactory& workloadFactory,
3706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3707{
3708 unsigned int shape[] = { 2, 2, 2, 2 };
3709
3710 // See dequantized values to the right.
3711 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3712 3, 3, 3, 3, 5, 5, 5, 5 });
3713
3714 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3715 2, 2, 2, 2, 5, 5, 5, 5 });
3716
3717 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3718 1, 1, 1, 1, 0, 0, 0, 0 });
3719
kevmay012b4d88e2019-01-24 14:05:09 +00003720 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3721 armnn::DataType::QuantisedAsymm8,
3722 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003723 workloadFactory,
3724 memoryManager,
3725 shape,
3726 input0,
3727 shape,
3728 input1,
3729 shape,
3730 output,
3731 1.0f,
3732 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003733}
3734
3735LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3736 armnn::IWorkloadFactory& workloadFactory,
3737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3738{
3739 const unsigned int shape0[] = { 1, 2, 2, 3 };
3740 const unsigned int shape1[] = { 1, 1, 1, 1 };
3741
3742 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3743 7, 8, 9, 10, 11, 12 });
3744
3745 std::vector<uint8_t> input1({ 1 });
3746
3747 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3748 1, 1, 1, 1, 1, 1 });
3749
kevmay012b4d88e2019-01-24 14:05:09 +00003750 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3751 armnn::DataType::QuantisedAsymm8,
3752 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003753 workloadFactory,
3754 memoryManager,
3755 shape0,
3756 input0,
3757 shape1,
3758 input1,
3759 shape0,
3760 output,
3761 1.0f,
3762 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003763}
3764
3765LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3766 armnn::IWorkloadFactory& workloadFactory,
3767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3768{
3769 const unsigned int shape0[] = { 1, 2, 2, 3 };
3770 const unsigned int shape1[] = { 1, 1, 1, 3 };
3771
3772 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3773 7, 8, 9, 10, 11, 12 });
3774
3775 std::vector<uint8_t> input1({ 1, 1, 3});
3776
3777 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3778 1, 1, 1, 1, 1, 1 });
3779
kevmay012b4d88e2019-01-24 14:05:09 +00003780 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3781 armnn::DataType::QuantisedAsymm8,
3782 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003783 workloadFactory,
3784 memoryManager,
3785 shape0,
3786 input0,
3787 shape1,
3788 input1,
3789 shape0,
3790 output,
3791 1.0f,
3792 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003793}
3794
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003795LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3797{
3798 const unsigned int width = 2;
3799 const unsigned int height = 2;
3800 const unsigned int channelCount = 2;
3801 const unsigned int batchSize = 2;
3802
3803 unsigned int shape[] = { batchSize, channelCount, height, width };
3804
3805 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3806 3, 3, 3, 3, 4, 4, 4, 4 });
3807
3808 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3809 4, 4, 4, 4, 5, 5, 5, 5 });
3810
3811 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3812 4, 4, 4, 4, 5, 5, 5, 5 });
3813
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003814 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3815 workloadFactory,
3816 memoryManager,
3817 shape,
3818 input0,
3819 shape,
3820 input1,
3821 shape,
3822 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003823}
3824
3825LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3826 armnn::IWorkloadFactory& workloadFactory,
3827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3828{
3829 unsigned int shape0[] = { 1, 2, 2, 2 };
3830 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3831
3832 unsigned int shape1[] = { 1, 1, 1, 1 };
3833 std::vector<float> input1({ 2 });
3834
3835 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3836
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003837 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3838 workloadFactory,
3839 memoryManager,
3840 shape0,
3841 input0,
3842 shape1,
3843 input1,
3844 shape0,
3845 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003846}
3847
3848LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3849 armnn::IWorkloadFactory& workloadFactory,
3850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3851{
3852 const unsigned int shape0[] = { 1, 2, 2, 3 };
3853 const unsigned int shape1[] = { 1, 1, 1, 3 };
3854
3855 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3856 7, 8, 9, 10, 11, 12 });
3857
3858 std::vector<float> input1({ 1, 2, 3});
3859
3860 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003861 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003862
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003863 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3864 workloadFactory,
3865 memoryManager,
3866 shape0,
3867 input0,
3868 shape1,
3869 input1,
3870 shape0,
3871 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003872}
3873
3874LayerTestResult<uint8_t, 4> MaximumUint8Test(
3875 armnn::IWorkloadFactory& workloadFactory,
3876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3877{
3878 unsigned int shape[] = { 2, 2, 2, 2 };
3879
3880 // See dequantized values to the right.
3881 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3882 3, 3, 3, 3, 4, 4, 4, 4 });
3883
3884 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3885 4, 4, 4, 4, 5, 5, 5, 5 });
3886
3887 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3888 4, 4, 4, 4, 5, 5, 5, 5 });
3889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003890 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3891 workloadFactory,
3892 memoryManager,
3893 shape,
3894 input0,
3895 shape,
3896 input1,
3897 shape,
3898 output,
3899 1.0f,
3900 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003901}
3902
3903LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3904 armnn::IWorkloadFactory& workloadFactory,
3905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3906{
3907 const unsigned int shape0[] = { 1, 2, 2, 3 };
3908 const unsigned int shape1[] = { 1, 1, 1, 1 };
3909
3910 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3911 7, 8, 9, 10, 11, 12 });
3912
3913 std::vector<uint8_t> input1({2});
3914
3915 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3916 7, 8, 9, 10, 11, 12 });
3917
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003918 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3919 workloadFactory,
3920 memoryManager,
3921 shape0,
3922 input0,
3923 shape1,
3924 input1,
3925 shape0,
3926 output,
3927 1.0f,
3928 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003929}
3930
3931LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3932 armnn::IWorkloadFactory& workloadFactory,
3933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3934{
3935 const unsigned int shape0[] = { 1, 2, 2, 3 };
3936 const unsigned int shape1[] = { 1, 1, 1, 3 };
3937
3938 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3939 7, 8, 9, 10, 11, 12 });
3940
3941 std::vector<uint8_t> input1({ 1, 10, 3});
3942
3943 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3944 7, 10, 9, 10, 11, 12 });
3945
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003946 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3947 workloadFactory,
3948 memoryManager,
3949 shape0,
3950 input0,
3951 shape1,
3952 input1,
3953 shape0,
3954 output,
3955 1.0f,
3956 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003957}
3958
Sadik Armagan2999a022019-04-09 14:20:12 +01003959LayerTestResult<int16_t, 4> MaximumInt16Test(
3960 armnn::IWorkloadFactory& workloadFactory,
3961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3962{
3963 unsigned int shape[] = { 2, 2, 2, 2 };
3964
3965 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3966 3, 3, 3, 3, 4, 4, 4, 4 });
3967
3968 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3969 4, 4, 4, 4, 5, 5, 5, 5 });
3970
3971 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3972 4, 4, 4, 4, 5, 5, 5, 5 });
3973
3974 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3975 workloadFactory,
3976 memoryManager,
3977 shape,
3978 input0,
3979 shape,
3980 input1,
3981 shape,
3982 output,
3983 1.0f,
3984 0);
3985}
3986
3987LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3988 armnn::IWorkloadFactory& workloadFactory,
3989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3990{
3991 const unsigned int shape0[] = { 1, 2, 2, 3 };
3992 const unsigned int shape1[] = { 1, 1, 1, 1 };
3993
3994 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3995 7, 8, 9, 10, 11, 12 });
3996
3997 std::vector<int16_t> input1({2});
3998
3999 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
4000 7, 8, 9, 10, 11, 12 });
4001
4002 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4003 workloadFactory,
4004 memoryManager,
4005 shape0,
4006 input0,
4007 shape1,
4008 input1,
4009 shape0,
4010 output,
4011 1.0f,
4012 0);
4013}
4014
4015LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
4016 armnn::IWorkloadFactory& workloadFactory,
4017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4018{
4019 const unsigned int shape0[] = { 1, 2, 2, 3 };
4020 const unsigned int shape1[] = { 1, 1, 1, 3 };
4021
4022 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4023 7, 8, 9, 10, 11, 12 });
4024
4025 std::vector<int16_t> input1({ 1, 10, 3});
4026
4027 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
4028 7, 10, 9, 10, 11, 12 });
4029
4030 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4031 workloadFactory,
4032 memoryManager,
4033 shape0,
4034 input0,
4035 shape1,
4036 input1,
4037 shape0,
4038 output,
4039 1.0f,
4040 0);
4041}
4042
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004043LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
4044 armnn::IWorkloadFactory& workloadFactory,
4045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4046{
4047 unsigned int shape0[] = { 1, 2, 2, 2 };
4048 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4049
4050 unsigned int shape1[] = { 1, 1, 1, 1 };
4051 std::vector<float> input1({ 2 });
4052
4053 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
4054
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004055 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
4056 workloadFactory,
4057 memoryManager,
4058 shape0,
4059 input0,
4060 shape1,
4061 input1,
4062 shape0,
4063 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004064}
4065
4066
4067LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
4068 armnn::IWorkloadFactory& workloadFactory,
4069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4070{
4071 unsigned int shape0[] = { 1, 2, 2, 2 };
4072 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
4073
4074 unsigned int shape1[] = { 1, 1, 1, 1 };
4075 std::vector<float> input1({ 5 });
4076
4077 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
4078
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004079 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
4080 workloadFactory,
4081 memoryManager,
4082 shape0,
4083 input0,
4084 shape1,
4085 input1,
4086 shape0,
4087 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004088}
4089
4090LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
4091 armnn::IWorkloadFactory & workloadFactory,
4092 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
4093{
4094 const unsigned int shape0[] = { 1, 2, 2, 3 };
4095 const unsigned int shape1[] = { 1, 1, 1, 3 };
4096
4097 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
4098 7, 1, 2, 3, 4, 5 });
4099
4100 std::vector<uint8_t> input1({ 1, 2, 3});
4101
4102 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
4103 1, 1, 2, 1, 2, 3 });
4104
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004105 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
4106 workloadFactory,
4107 memoryManager,
4108 shape0,
4109 input0,
4110 shape1,
4111 input1,
4112 shape0,
4113 output,
4114 1.0f,
4115 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00004116}
4117
Sadik Armagan2999a022019-04-09 14:20:12 +01004118LayerTestResult<int16_t, 4> MinimumInt16Test(
4119 armnn::IWorkloadFactory& workloadFactory,
4120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4121{
4122 unsigned int shape[] = { 2, 2, 2, 2 };
4123
4124 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
4125 3, 3, 3, 3, 4, 4, 4, 4 });
4126
4127 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
4128 4, 4, 4, 4, 5, 5, 5, 5 });
4129
4130 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
4131 3, 3, 3, 3, 4, 4, 4, 4 });
4132
4133 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4134 workloadFactory,
4135 memoryManager,
4136 shape,
4137 input0,
4138 shape,
4139 input1,
4140 shape,
4141 output,
4142 1.0f,
4143 0);
4144}
4145
4146LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
4147 armnn::IWorkloadFactory& workloadFactory,
4148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4149{
4150 const unsigned int shape0[] = { 1, 2, 2, 3 };
4151 const unsigned int shape1[] = { 1, 1, 1, 1 };
4152
4153 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4154 7, 8, 9, 10, 11, 12 });
4155
4156 std::vector<int16_t> input1({2});
4157
4158 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
4159 2, 2, 2, 2, 2, 2 });
4160
4161 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4162 workloadFactory,
4163 memoryManager,
4164 shape0,
4165 input0,
4166 shape1,
4167 input1,
4168 shape0,
4169 output,
4170 1.0f,
4171 0);
4172}
4173
4174LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
4175 armnn::IWorkloadFactory& workloadFactory,
4176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4177{
4178 const unsigned int shape0[] = { 1, 2, 2, 3 };
4179 const unsigned int shape1[] = { 1, 1, 1, 3 };
4180
4181 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
4182 7, 8, 9, 10, 11, 12 });
4183
4184 std::vector<int16_t> input1({ 1, 10, 3});
4185
4186 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
4187 1, 8, 3, 1, 10, 3 });
4188
4189 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
4190 workloadFactory,
4191 memoryManager,
4192 shape0,
4193 input0,
4194 shape1,
4195 input1,
4196 shape0,
4197 output,
4198 1.0f,
4199 0);
4200}
4201
Francis Murtaghe7a86a42018-08-29 12:42:10 +01004202namespace {
Matthew Jacksondba634f2019-08-15 15:14:18 +01004203template<std::size_t NumDims>
4204LayerTestResult<float,NumDims> MultiplicationTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004205 armnn::IWorkloadFactory& workloadFactory,
4206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Jacksondba634f2019-08-15 15:14:18 +01004207 const unsigned int shape0[NumDims],
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004208 const std::vector<float> & values0,
Matthew Jacksondba634f2019-08-15 15:14:18 +01004209 const unsigned int shape1[NumDims],
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004210 const std::vector<float> & values1,
Matthew Jacksondba634f2019-08-15 15:14:18 +01004211 const unsigned int outShape[NumDims],
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004212 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00004213{
Matthew Jacksondba634f2019-08-15 15:14:18 +01004214 armnn::TensorInfo inputTensorInfo0{NumDims, shape0, armnn::DataType::Float32};
4215 armnn::TensorInfo inputTensorInfo1{NumDims, shape1, armnn::DataType::Float32};
4216 armnn::TensorInfo outputTensorInfo{NumDims, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00004217
Matthew Jacksondba634f2019-08-15 15:14:18 +01004218 auto input0 = MakeTensor<float, NumDims>(inputTensorInfo0, values0);
4219 auto input1 = MakeTensor<float, NumDims>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00004220
Matthew Jacksondba634f2019-08-15 15:14:18 +01004221 LayerTestResult<float,NumDims> ret(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004222
4223 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4224 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4225 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4226
4227 armnn::MultiplicationQueueDescriptor data;
4228 armnn::WorkloadInfo info;
4229 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4230 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4231 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4232
4233 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4234
4235 inputHandle0->Allocate();
4236 inputHandle1->Allocate();
4237 outputHandle->Allocate();
4238
Matthew Jacksondba634f2019-08-15 15:14:18 +01004239 CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
4240 CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
telsoa014fcda012018-03-09 14:13:49 +00004241
Derek Lambertif30f7d32019-04-09 10:25:02 +01004242 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004243 workload->Execute();
4244
Matthew Jacksondba634f2019-08-15 15:14:18 +01004245 CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
telsoa014fcda012018-03-09 14:13:49 +00004246
Matthew Jacksondba634f2019-08-15 15:14:18 +01004247 ret.outputExpected = MakeTensor<float, NumDims>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00004248 return ret;
4249}
surmeh01bceff2f2018-03-29 16:29:27 +01004250} // anonymous namespace
4251
4252
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004253LayerTestResult<float,4> MultiplicationTest(
4254 armnn::IWorkloadFactory& workloadFactory,
4255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004256{
4257 const unsigned int width = 2;
4258 const unsigned int height = 2;
4259 const unsigned int channelCount = 2;
4260 const unsigned int batchSize = 2;
4261
4262 unsigned int shape[] = { batchSize, channelCount, height, width };
4263
4264 std::vector<float> input0({
4265 1, 1, 1, 1, 2, 2, 2, 2,
4266 3, 3, 3, 3, 4, 4, 4, 4 });
4267
4268 std::vector<float> input1({
4269 2, 2, 2, 2, 3, 3, 3, 3,
4270 4, 4, 4, 4, 5, 5, 5, 5 });
4271
4272 std::vector<float> output({
4273 2, 2, 2, 2, 6, 6, 6, 6,
4274 12, 12, 12, 12, 20, 20, 20, 20 });
4275
Matthew Jacksondba634f2019-08-15 15:14:18 +01004276 return MultiplicationTestHelper<4>(workloadFactory,
4277 memoryManager,
4278 shape,
4279 input0,
4280 shape,
4281 input1,
4282 shape,
4283 output);
4284}
4285
4286LayerTestResult<float,5> Multiplication5dTest(
4287 armnn::IWorkloadFactory& workloadFactory,
4288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4289{
4290 const unsigned int width = 3;
4291 const unsigned int height = 2;
4292 const unsigned int channelCount = 2;
4293 const unsigned int batchSize = 2;
4294 const unsigned int depth = 2;
4295
4296 unsigned int shape[] = { depth, batchSize, channelCount, height, width };
4297
4298 std::vector<float> input0({
4299 1.80f, 0.20f, 2.30f, 1.30f, 2.10f, 1.00f,
4300 2.60f, 0.60f, 2.10f, 2.30f, 2.30f, 2.00f,
4301
4302 2.50f, 1.00f, 2.90f, 3.10f, 1.50f, 2.40f,
4303 2.80f, 1.10f, 1.00f, 3.20f, 1.00f, 2.30f,
4304
4305
4306 0.30f, 2.20f, 1.00f, 0.20f, 1.60f, 1.40f,
4307 0.80f, 3.20f, 0.10f, 0.10f, 3.10f, 2.10f,
4308
4309 1.50f, 2.40f, 1.40f, 0.70f, 2.40f, 1.40f,
4310 1.60f, 1.20f, 1.90f, 0.80f, 0.00f, 0.10f,
4311 });
4312
4313 std::vector<float> input1({
4314 0.70f, 1.00f, 2.90f, 2.20f, 3.10f, 2.80f,
4315 1.80f, 2.00f, 0.50f, 2.30f, 1.20f, 2.70f,
4316
4317 2.40f, 0.20f, 3.20f, 1.60f, 0.20f, 2.50f,
4318 2.30f, 0.70f, 2.70f, 1.80f, 2.90f, 2.70f,
4319
4320
4321 3.20f, 3.20f, 0.70f, 1.90f, 2.70f, 2.50f,
4322 2.40f, 0.90f, 2.30f, 1.80f, 2.50f, 2.00f,
4323
4324 1.60f, 2.20f, 1.60f, 2.00f, 0.30f, 3.20f,
4325 0.40f, 3.00f, 2.60f, 0.30f, 0.00f, 2.50f,
4326 });
4327
4328 std::vector<float> output({
4329 1.26f, 0.20f, 6.67f, 2.86f, 6.51f, 2.80f,
4330 4.68f, 1.20f, 1.05f, 5.29f, 2.76f, 5.40f,
4331
4332 6.00f, 0.20f, 9.28f, 4.96f, 0.30f, 6.00f,
4333 6.44f, 0.77f, 2.70f, 5.76f, 2.90f, 6.21f,
4334
4335
4336 0.96f, 7.04f, 0.70f, 0.38f, 4.32f, 3.50f,
4337 1.92f, 2.88f, 0.23f, 0.18f, 7.75f, 4.20f,
4338
4339 2.40f, 5.28f, 2.24f, 1.40f, 0.72f, 4.48f,
4340 0.64f, 3.60f, 4.94f, 0.24f, 0.00f, 0.25f,
4341 });
4342
4343 return MultiplicationTestHelper<5>(workloadFactory,
4344 memoryManager,
4345 shape,
4346 input0,
4347 shape,
4348 input1,
4349 shape,
4350 output);
surmeh01bceff2f2018-03-29 16:29:27 +01004351}
4352
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004353LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
4354 armnn::IWorkloadFactory& workloadFactory,
4355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004356{
4357 unsigned int shape0[] = { 1, 2, 2, 2 };
4358 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
4359
4360 unsigned int shape1[] = { 1, 1, 1, 1 };
4361 std::vector<float> input1({ 2 });
4362
4363 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
4364
Matthew Jacksondba634f2019-08-15 15:14:18 +01004365 return MultiplicationTestHelper<4>(workloadFactory,
4366 memoryManager,
4367 shape0,
4368 input0,
4369 shape1,
4370 input1,
4371 shape0,
4372 output);
surmeh01bceff2f2018-03-29 16:29:27 +01004373}
4374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004375LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
4376 armnn::IWorkloadFactory& workloadFactory,
4377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01004378{
4379 unsigned int shape0[] = { 1, 3, 3, 2 };
4380 std::vector<float> input0({
4381 1, 2, 3, 4, 5, 6,
4382 7, 8, 9, 10, 11, 12,
4383 13, 14, 15, 16, 17, 18});
4384
4385 unsigned int shape1[] = { 1, 1, 1, 2 };
4386 std::vector<float> input1({ 1, 2 });
4387
4388 std::vector<float> output({
4389 1, 4, 3, 8, 5, 12,
4390 7, 16, 9, 20, 11, 24,
4391 13, 28, 15, 32, 17, 36});
4392
Matthew Jacksondba634f2019-08-15 15:14:18 +01004393 return MultiplicationTestHelper<4>(workloadFactory,
4394 memoryManager,
4395 shape0,
4396 input0,
4397 shape1,
4398 input1,
4399 shape0,
4400 output);
surmeh01bceff2f2018-03-29 16:29:27 +01004401}
telsoa014fcda012018-03-09 14:13:49 +00004402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004403LayerTestResult<float,4> CompareMultiplicationTest(
4404 armnn::IWorkloadFactory& workloadFactory,
4405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4406 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004407{
4408 const unsigned int width = 16;
4409 const unsigned int height = 32;
4410 const unsigned int channelCount = 2;
4411 const unsigned int batchSize = 5;
4412
4413 armnn::TensorInfo inputTensorInfo0;
4414 armnn::TensorInfo inputTensorInfo1;
4415 armnn::TensorInfo outputTensorInfo;
4416
4417 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
4418
4419 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4420 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4421 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4422
4423 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
4424
4425 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
4426 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
4427
4428 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
4429 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
4430 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4431
4432 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
4433 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
4434 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4435
4436 armnn::MultiplicationQueueDescriptor data;
4437 armnn::WorkloadInfo info;
4438 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
4439 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
4440 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4441
4442 armnn::MultiplicationQueueDescriptor refData = data;
4443 armnn::WorkloadInfo refInfo = info;
4444 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
4445 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
4446 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4447
4448 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
4449 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
4450
4451 inputHandle0->Allocate();
4452 inputHandle1->Allocate();
4453 outputHandle->Allocate();
4454 inputHandle0Ref->Allocate();
4455 inputHandle1Ref->Allocate();
4456 outputHandleRef->Allocate();
4457
4458 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
4459 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
4460 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
4461 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
4462
Derek Lambertif30f7d32019-04-09 10:25:02 +01004463 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004464 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004465 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004466 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00004467 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
4468 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
4469
4470 return comparisonResult;
4471}
4472
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004473LayerTestResult<float,4> CompareBatchNormTest(
4474 armnn::IWorkloadFactory& workloadFactory,
4475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4476 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00004477{
4478 const unsigned int width = 2;
4479 const unsigned int height = 3;
4480 const unsigned int channels = 5;
4481 const unsigned int batchSize = 3;
4482
4483 armnn::TensorInfo inputTensorInfo;
4484 armnn::TensorInfo outputTensorInfo;
4485 armnn::TensorInfo tensorInfo;
4486
4487 constexpr unsigned int shape[] = {batchSize, channels, height, width};
4488 constexpr unsigned int tensorShape[] = {channels};
4489
4490 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4491 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4492 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4493
4494 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4495
4496 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
4497 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4498 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
4499 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
4500
4501 LayerTestResult<float,4> ret(outputTensorInfo);
4502
4503 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4504 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4505
4506 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4507 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4508
4509 armnn::BatchNormalizationQueueDescriptor data;
4510 armnn::WorkloadInfo info;
4511 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4512 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4513 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4514 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4515
4516 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4517 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4518 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4519 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4520
4521 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4522 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4523 data.m_Mean = &meanTensor;
4524 data.m_Variance = &varianceTensor;
4525 data.m_Beta = &betaTensor;
4526 data.m_Gamma = &gammaTensor;
4527 data.m_Parameters.m_Eps = 0.01f;
4528
4529 armnn::BatchNormalizationQueueDescriptor refData = data;
4530 armnn::WorkloadInfo refInfo = info;
4531 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4532 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4533
4534 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4535 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4536
4537 inputHandle->Allocate();
4538 outputHandle->Allocate();
4539 inputHandleRef->Allocate();
4540 outputHandleRef->Allocate();
4541
4542 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4543 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4544
Derek Lambertif30f7d32019-04-09 10:25:02 +01004545 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004546 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004547 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004548 workloadRef->Execute();
4549
4550 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4551 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4552
4553 return ret;
4554}
4555
surmeh013537c2c2018-05-18 16:31:43 +01004556template<typename T>
4557void PermuteTensorData(
4558 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004560 const armnn::PermutationVector& mappings,
4561 armnn::TensorInfo & inputTensorInfo,
4562 const T * inputData,
4563 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004564{
surmeh013537c2c2018-05-18 16:31:43 +01004565 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4566 if (inputData == nullptr)
4567 {
4568 // Nullptr is an error in the test. By returning without doing the concatenation
4569 // I expect the caller to fail the test. It still makes sense to report this as
4570 // an assert for Debug builds.
4571 return;
4572 }
telsoa014fcda012018-03-09 14:13:49 +00004573
surmeh013537c2c2018-05-18 16:31:43 +01004574 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4575
4576 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4577 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4578
4579 armnn::PermuteQueueDescriptor queueDescriptor;
4580 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4581 armnn::WorkloadInfo workloadInfo;
4582 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4583 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4584
4585 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4586
4587 inputHandle->Allocate();
4588 outputHandle->Allocate();
4589
4590 CopyDataToITensorHandle(inputHandle.get(), inputData);
4591
Derek Lambertif30f7d32019-04-09 10:25:02 +01004592 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004593 workload->Execute();
4594
4595 outputData.resize(outputTensorInfo.GetNumElements());
4596 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4597 inputTensorInfo = outputTensorInfo;
4598}
4599
Jim Flynn825af452019-05-20 12:49:28 +01004600armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004601 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4602 unsigned int concatDim)
4603{
telsoa014fcda012018-03-09 14:13:49 +00004604 std::vector<armnn::TensorShape> shapes;
4605 shapes.reserve(inputTensorInfos.size());
4606 for (const armnn::TensorInfo& it: inputTensorInfos)
4607 {
4608 shapes.push_back(it.GetShape());
4609 }
surmeh013537c2c2018-05-18 16:31:43 +01004610
Jim Flynn825af452019-05-20 12:49:28 +01004611 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4612 shapes.end(),
4613 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004614}
4615
4616//
narpra015cdda352018-11-19 15:30:27 +00004617// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4618// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4619// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004620//
4621
4622bool NeedPermuteForConcat(
4623 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4624 unsigned int concatDim)
4625{
4626 // See note above. Additionally we expect the input shapes to have the
4627 // same number of dimensions.
4628 unsigned int nDimensions = 0;
4629
telsoa01c577f2c2018-08-31 09:22:23 +01004630 // Determine the number of dimensions as well as sanity check them
4631 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004632 for (auto && tensorInfo : inputTensorInfos)
4633 {
4634 if (!nDimensions)
4635 {
4636 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4637 }
4638 else
4639 {
4640 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4641 "Input shapes must have the same number of dimensions");
4642 }
4643 }
4644
narpra015cdda352018-11-19 15:30:27 +00004645 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004646}
4647
4648armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4649{
4650 unsigned int numDims = inputShape.GetNumDimensions();
4651 if (numDims >= 3)
4652 {
4653 // Nothing to do if the inputShape has at least 3 dimensions.
4654 return inputShape;
4655 }
4656
4657 std::vector<unsigned int> newDims(size_t(3), 1u);
4658 unsigned int expandedBy = 3 - numDims;
4659 for (unsigned int i=0; i<numDims; ++i)
4660 {
4661 newDims[expandedBy+i] = inputShape[i];
4662 }
4663 return armnn::TensorShape(3u, &newDims[0]);
4664}
4665
4666void Generate3dPermuteVectorForConcat(
4667 unsigned int numDimensions,
4668 unsigned int & concatDim,
4669 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4670{
4671 BOOST_ASSERT_MSG(numDimensions <= 3,
4672 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004673 unsigned int expandedBy = 3 - numDimensions;
4674 unsigned int expandedConcatAxis = concatDim + expandedBy;
4675
4676 if (expandedConcatAxis == 2)
4677 {
4678 concatDim = 0;
4679 armnn::PermutationVector forwardPermutation({1, 2, 0});
4680 armnn::PermutationVector reversePermutation({2, 0, 1});
4681 permutations = std::make_pair(forwardPermutation, reversePermutation);
4682 }
4683 else if (expandedConcatAxis == 1)
4684 {
4685 concatDim = 0;
4686 armnn::PermutationVector forwardPermutation({2, 0, 1});
4687 armnn::PermutationVector reversePermutation({1, 2, 0});
4688 permutations = std::make_pair(forwardPermutation, reversePermutation);
4689 }
4690 else
4691 {
4692 BOOST_ASSERT(expandedConcatAxis == 0);
4693 concatDim = 0;
4694 }
4695}
4696
4697//
4698// Permute the input tensors so we can do a supported concatenation.
4699// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4700// at the front. Finally this function tells what the output shape
4701// of the permuted concatenated tensor is going to be.
4702//
4703template <typename T>
4704void PermuteInputsForConcat(
4705 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004707 std::vector<armnn::TensorInfo> & inputTensorInfos,
4708 std::vector<T *> & inputData,
4709 std::vector<std::vector<T>> & inputDataStorage,
4710 armnn::PermutationVector & permuteVector,
4711 unsigned int & concatDim,
4712 armnn::TensorInfo & outputTensorInfo)
4713{
4714 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4715 "Expecting more than one tensor to be concatenated here");
4716
4717 unsigned int numDims = 0;
4718 unsigned int nthInput = 0;
4719 const armnn::PermutationVector identity({0, 1, 2});
4720
4721 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4722 std::make_pair(identity, identity);
4723
4724 inputDataStorage.resize(inputData.size());
4725
4726 for (auto && tensorInfo : inputTensorInfos)
4727 {
4728 if (numDims == 0)
4729 {
4730 numDims = tensorInfo.GetShape().GetNumDimensions();
4731 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004732
telsoa01c577f2c2018-08-31 09:22:23 +01004733 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004734 permuteVector = permutations.second;
4735 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4736 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4737 }
4738 else
4739 {
4740 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4741 "All inputs must have the same number of dimensions");
4742 }
4743
4744 armnn::TensorInfo newTensorInfo = tensorInfo;
4745 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4746
4747 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004748 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004749 permutations.first,
4750 newTensorInfo,
4751 inputData[nthInput],
4752 inputDataStorage[nthInput]);
4753
4754 inputData[nthInput] = inputDataStorage[nthInput].data();
4755 inputTensorInfos[nthInput] = newTensorInfo;
4756
4757 ++nthInput;
4758 }
4759
4760 outputTensorInfo.SetShape(
4761 armnnUtils::Permuted(
4762 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4763 permutations.first));
4764}
4765
4766
4767//
4768// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004769// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004770// output.
4771//
4772template <typename T>
4773void PermuteOutputForConcat(
4774 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004776 const armnn::TensorInfo & tensorInfo,
4777 const armnn::PermutationVector & permuteVector,
4778 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4779 T * data)
4780{
4781 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4782 if (data == nullptr)
4783 {
4784 // Nullptr is an error in the test. By returning without doing the permutation
4785 // I expect the caller to fail the test. It still makes sense to report this as
4786 // an assert for Debug builds.
4787 return;
4788 }
4789
4790 armnn::TensorInfo resultTensorInfo = tensorInfo;
4791 std::vector<T> inputData(tensorInfo.GetNumElements());
4792 std::vector<T> outputData;
4793
4794 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4795
4796 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004797 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004798 permuteVector,
4799 resultTensorInfo,
4800 &inputData[0],
4801 outputData);
4802
4803 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4804}
4805
4806template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004807void Concatenate(
4808 armnn::IWorkloadFactory& workloadFactory,
4809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4810 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4811 std::initializer_list<T *> inputsOrig,
4812 const armnn::TensorInfo& outputTensorInfoOrig,
4813 T * output,
narpra015cdda352018-11-19 15:30:27 +00004814 unsigned int concatDim,
4815 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004816{
4817 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4818 if (output == nullptr)
4819 {
4820 // Nullptr is an error in the test. By returning without doing the permutation
4821 // I expect the caller to fail the test. It still makes sense to report this as
4822 // an assert for Debug builds.
4823 return;
4824 }
4825
telsoa01c577f2c2018-08-31 09:22:23 +01004826 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004827 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4828 std::vector<T *> inputs = inputsOrig;
4829 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4830
4831 armnn::PermutationVector permuteVector{0, 1, 2};
4832
telsoa01c577f2c2018-08-31 09:22:23 +01004833 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004834 std::vector<std::vector<T>> tmpInputDataStorage;
4835
4836 const size_t inputCount = inputTensorInfos.size();
4837
4838 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4839
4840 if (needPermuteForConcat)
4841 {
4842 //
4843 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004844 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004845 //
4846 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004847 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004848 inputTensorInfos,
4849 inputs,
4850 tmpInputDataStorage,
4851 permuteVector,
4852 concatDim,
4853 outputTensorInfo);
4854 }
4855
narpra015cdda352018-11-19 15:30:27 +00004856 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004857
4858 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4859 inputHandles.reserve(inputCount);
4860
narpra015cdda352018-11-19 15:30:27 +00004861 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4862
Jim Flynne242f2d2019-05-22 14:24:13 +01004863 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004864 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004865 queueDescriptor.m_Parameters = viewsDescriptor;
4866
4867 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004868 {
narpra015cdda352018-11-19 15:30:27 +00004869 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4870 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4871 {
4872 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4873 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4874 }
telsoa014fcda012018-03-09 14:13:49 +00004875
narpra015cdda352018-11-19 15:30:27 +00004876 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004877
narpra015cdda352018-11-19 15:30:27 +00004878 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4879 for (unsigned int i = 0; i < inputCount; ++i)
4880 {
4881 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4882 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4883 subTensorsSupported ?
4884 workloadFactory.CreateSubTensorHandle(*outputHandle,
4885 inputTensorInfo.GetShape(),
4886 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4887 workloadFactory.CreateTensorHandle(inputTensorInfo);
4888
4889 inputHandles.emplace_back(std::move(inputHandle));
4890 }
4891
telsoa014fcda012018-03-09 14:13:49 +00004892 }
narpra015cdda352018-11-19 15:30:27 +00004893 else
4894 {
4895 for (unsigned int i = 0; i < inputCount; ++i)
4896 {
4897 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4898 inputHandles.emplace_back(std::move(inputHandle));
4899 }
4900 }
telsoa014fcda012018-03-09 14:13:49 +00004901
4902 for (unsigned int i = 0; i < inputCount; ++i)
4903 {
surmeh013537c2c2018-05-18 16:31:43 +01004904 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004905 }
4906
4907 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4908
Jim Flynn4ed6c832019-05-20 11:02:46 +01004909 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004910
4911 for (auto& inputHandle : inputHandles)
4912 {
4913 inputHandle->Allocate();
4914 }
4915
4916 outputHandle->Allocate();
4917
4918 unsigned int nextInputId = 0;
4919 for (auto& inputHandle : inputHandles)
4920 {
surmeh013537c2c2018-05-18 16:31:43 +01004921 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4922 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004923 }
4924
Derek Lambertif30f7d32019-04-09 10:25:02 +01004925 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004926 workload->Execute();
4927
surmeh013537c2c2018-05-18 16:31:43 +01004928 if (needPermuteForConcat)
4929 {
4930 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004931 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004932 outputTensorInfo,
4933 permuteVector,
4934 std::move(outputHandle),
4935 output);
4936 }
4937 else
4938 {
4939 CopyDataFromITensorHandle(output, outputHandle.get());
4940 }
telsoa014fcda012018-03-09 14:13:49 +00004941}
4942
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004943template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004944LayerTestResult<T, 1> Concatenation1dTestImpl(
4945 armnn::IWorkloadFactory& workloadFactory,
4946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4947 float qScale,
4948 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004949{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004950 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004951
4952 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4953 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4954 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4955
Jim Flynncbb66aa2019-05-15 13:03:54 +01004956 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004957
4958 LayerTestResult<T, 1> result(outputTensorInfo);
4959
4960 std::vector<T> output;
4961 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004962 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004963 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4964 { input0.data(), input1.data(), input2.data() },
4965 outputTensorInfo,
4966 output.data(),
4967 0,
4968 true);
telsoa014fcda012018-03-09 14:13:49 +00004969
4970 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4971 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4972 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4973 }));
4974
4975 return result;
4976}
4977
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004978LayerTestResult<float, 1> Concatenation1dTest(
4979 armnn::IWorkloadFactory& workloadFactory,
4980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004981{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004982 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004983}
4984
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004985template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004986LayerTestResult<T, 2> Concatenation2dTestImpl(
4987 armnn::IWorkloadFactory& workloadFactory,
4988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004989 const armnn::TensorInfo& outputTensorInfo,
4990 unsigned int dimension,
4991 const float qScale,
4992 const int32_t qOffset)
4993{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004994 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004995
4996 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4997 // Batch 0
4998 1.0f, 2.0f, 3.0f,
4999
5000 // Batch 1
5001 10.0f, 11.0f, 12.0f,
5002 }));
5003
5004 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5005 // Batch 0
5006 4.0f, 5.0f, 6.0f,
5007
5008 // Batch 1
5009 13.0f, 14.0f, 15.0f,
5010 }));
5011
5012 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5013 // Batch 0
5014 7.0f, 8.0f, 9.0f,
5015
5016 // Batch 1
5017 16.0f, 17.0f, 18.0f,
5018 }));
5019
5020 LayerTestResult<T, 2> result(outputTensorInfo);
5021
5022 std::vector<T> output;
5023 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005024 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005025 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5026 { input0.data(), input1.data(), input2.data() },
5027 outputTensorInfo,
5028 output.data(),
5029 dimension,
5030 true);
telsoa014fcda012018-03-09 14:13:49 +00005031
5032 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5033 return result;
5034}
5035
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005036template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005037LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
5038 armnn::IWorkloadFactory& workloadFactory,
5039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5040 float qScale,
5041 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005042{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005043 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005044
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005045 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
5046 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
5047
telsoa014fcda012018-03-09 14:13:49 +00005048 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5049 // Batch 0
5050 1.0f, 2.0f, 3.0f,
5051
5052 // Batch 1
5053 10.0f, 11.0f, 12.0f,
5054
5055 // Batch 2
5056 4.0f, 5.0f, 6.0f,
5057
5058 // Batch 3
5059 13.0f, 14.0f, 15.0f,
5060
5061 // Batch 4
5062 7.0f, 8.0f, 9.0f,
5063
5064 // Batch 5
5065 16.0f, 17.0f, 18.0f,
5066 }));
5067
5068 return result;
5069}
5070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005071LayerTestResult<float, 2> Concatenation2dDim0Test(
5072 armnn::IWorkloadFactory& workloadFactory,
5073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005074{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005075 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005076}
5077
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005078template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005079LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
5080 armnn::IWorkloadFactory& workloadFactory,
5081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5082 float qScale,
5083 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005084{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005085 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005087 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
5088 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
5089
telsoa014fcda012018-03-09 14:13:49 +00005090 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5091 // Batch 0
5092 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5093
5094 // Batch 1
5095 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
5096 }));
5097
5098 return result;
5099}
5100
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005101LayerTestResult<float, 2> Concatenation2dDim1Test(
5102 armnn::IWorkloadFactory& workloadFactory,
5103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005104{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005105 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005106}
5107
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005108template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005109LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
5110 armnn::IWorkloadFactory& workloadFactory,
5111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5112 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005113 int32_t qOffset)
5114{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005115 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005116 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5117 // Batch 0
5118 1.0f, 2.0f, 3.0f,
5119
5120 // Batch 1
5121 10.0f, 11.0f, 12.0f,
5122 }));
5123
Jim Flynncbb66aa2019-05-15 13:03:54 +01005124 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005125 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5126 // Batch 0
5127 4.0f, 5.0f, 6.0f,
5128
5129 // Batch 1
5130 13.0f, 14.0f, 15.0f,
5131
5132 // Batch 0
5133 7.0f, 8.0f, 9.0f,
5134 }));
5135
Jim Flynncbb66aa2019-05-15 13:03:54 +01005136 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005137 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5138 // Batch 1
5139 16.0f, 17.0f, 18.0f,
5140 }));
5141
Jim Flynncbb66aa2019-05-15 13:03:54 +01005142 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005143 LayerTestResult<T, 2> result(outputTensorInfo);
5144
5145 std::vector<T> output;
5146 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005147 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005148 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5149 { input0.data(), input1.data(), input2.data() },
5150 outputTensorInfo,
5151 output.data(),
5152 0,
5153 true);
telsoa014fcda012018-03-09 14:13:49 +00005154
5155 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5156 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5157 // Batch 0
5158 1.0f, 2.0f, 3.0f,
5159
5160 // Batch 1
5161 10.0f, 11.0f, 12.0f,
5162
5163 // Batch 2
5164 4.0f, 5.0f, 6.0f,
5165
5166 // Batch 3
5167 13.0f, 14.0f, 15.0f,
5168
5169 // Batch 4
5170 7.0f, 8.0f, 9.0f,
5171
5172 // Batch 5
5173 16.0f, 17.0f, 18.0f,
5174 }));
5175
5176 return result;
5177}
5178
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005179LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
5180 armnn::IWorkloadFactory& workloadFactory,
5181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005182{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005183 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5184 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005185}
5186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005187template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005188LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
5189 armnn::IWorkloadFactory& workloadFactory,
5190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5191 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005192 int32_t qOffset)
5193{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005194 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005195 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5196 // Batch 0
5197 1.0f, 2.0f, 3.0f,
5198
5199 // Batch 1
5200 10.0f, 11.0f, 12.0f,
5201 }));
5202
Jim Flynncbb66aa2019-05-15 13:03:54 +01005203 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005204 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5205 // Batch 0
5206 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
5207
5208 // Batch 1
5209 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
5210 }));
5211
Jim Flynncbb66aa2019-05-15 13:03:54 +01005212 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005213 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5214 // Batch 0
5215 9.0f,
5216
5217 // Batch 1
5218 18.0f
5219 }));
5220
Jim Flynncbb66aa2019-05-15 13:03:54 +01005221 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005222 LayerTestResult<T, 2> result(outputTensorInfo);
5223
5224 std::vector<T> output;
5225 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005226 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005227 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5228 { input0.data(), input1.data(), input2.data() },
5229 outputTensorInfo,
5230 output.data(),
5231 1,
5232 true);
telsoa014fcda012018-03-09 14:13:49 +00005233
5234 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
5235 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5236 // Batch 0
5237 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
5238
5239 // Batch 1
5240 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
5241 }));
5242
5243 return result;
5244}
5245
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005246LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
5247 armnn::IWorkloadFactory& workloadFactory,
5248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005249{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005250 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5251 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005252}
5253
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005254template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005255LayerTestResult<T, 3> Concatenation3dTestImpl(
5256 armnn::IWorkloadFactory& workloadFactory,
5257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005258 const armnn::TensorInfo& outputTensorInfo,
5259 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00005260 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00005261 float qScale,
5262 int32_t qOffset)
5263{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005264 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005265
5266 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5267 // Batch 0, Channel 0
5268 1.0f, 2.0f,
5269
5270 // Batch 0, Channel 1
5271 3.0f, 4.0f,
5272
5273 // Batch 0, Channel 2
5274 5.0f, 6.0f,
5275
5276 // Batch 1, Channel 0
5277 19.0f, 20.0f,
5278
5279 // Batch 1, Channel 1
5280 21.0f, 22.0f,
5281
5282 // Batch 1, Channel 2
5283 23.0f, 24.0f
5284 }));
5285
5286 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5287 // Batch 0, Channel 0
5288 7.0f, 8.0f,
5289
5290 // Batch 0, Channel 1
5291 9.0f, 10.0f,
5292
5293 // Batch 0, Channel 2
5294 11.0f, 12.0f,
5295
5296 // Batch 1, Channel 0
5297 25.0f, 26.0f,
5298
5299 // Batch 1, Channel 1
5300 27.0f, 28.0f,
5301
5302 // Batch 1, Channel 2
5303 29.0f, 30.0f
5304 }));
5305
5306 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5307 // Batch 0, Channel 0
5308 13.0f, 14.0f,
5309
5310 // Batch 0, Channel 1
5311 15.0f, 16.0f,
5312
5313 // Batch 0, Channel 2
5314 17.0f, 18.0f,
5315
5316 // Batch 1, Channel 0
5317 31.0f, 32.0f,
5318
5319 // Batch 1, Channel 1
5320 33.0f, 34.0f,
5321
5322 // Batch 1, Channel 2
5323 35.0f, 36.0f
5324 }));
5325
5326 LayerTestResult<T, 3> result(outputTensorInfo);
5327
5328 std::vector<T> output;
5329 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005330 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005331 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
5332 { input0.data(), input1.data(), input2.data() },
5333 outputTensorInfo,
5334 output.data(),
5335 dimension,
5336 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005337
5338 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5339 return result;
5340}
5341
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005342template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005343LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
5344 armnn::IWorkloadFactory& workloadFactory,
5345 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5346 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005347 int32_t qOffset)
5348{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005349 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005350
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005351 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5352 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5353
telsoa014fcda012018-03-09 14:13:49 +00005354 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5355 // Batch 0, Channel 0
5356 1.0f, 2.0f,
5357
5358 // Batch 0, Channel 1
5359 3.0f, 4.0f,
5360
5361 // Batch 0, Channel 2
5362 5.0f, 6.0f,
5363
5364 // Batch 1, Channel 0
5365 19.0f, 20.0f,
5366
5367 // Batch 1, Channel 1
5368 21.0f, 22.0f,
5369
5370 // Batch 1, Channel 2
5371 23.0f, 24.0f,
5372
5373 // Batch 2, Channel 0
5374 7.0f, 8.0f,
5375
5376 // Batch 2, Channel 1
5377 9.0f, 10.0f,
5378
5379 // Batch 2, Channel 2
5380 11.0f, 12.0f,
5381
5382 // Batch 3, Channel 0
5383 25.0f, 26.0f,
5384
5385 // Batch 3, Channel 1
5386 27.0f, 28.0f,
5387
5388 // Batch 3, Channel 2
5389 29.0f, 30.0f,
5390
5391 // Batch 4, Channel 0
5392 13.0f, 14.0f,
5393
5394 // Batch 4, Channel 1
5395 15.0f, 16.0f,
5396
5397 // Batch 4, Channel 2
5398 17.0f, 18.0f,
5399
5400 // Batch 5, Channel 0
5401 31.0f, 32.0f,
5402
5403 // Batch 5, Channel 1
5404 33.0f, 34.0f,
5405
5406 // Batch 5, Channel 2
5407 35.0f, 36.0f
5408 }));
narpra015cdda352018-11-19 15:30:27 +00005409
telsoa014fcda012018-03-09 14:13:49 +00005410 return result;
5411}
5412
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005413LayerTestResult<float, 3> Concatenation3dDim0Test(
5414 armnn::IWorkloadFactory& workloadFactory,
5415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005416{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005417 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005418}
5419
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005420template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005421LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
5422 armnn::IWorkloadFactory& workloadFactory,
5423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5424 float qScale,
5425 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005426{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005427 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005428
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005429 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5430 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005431
telsoa014fcda012018-03-09 14:13:49 +00005432 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5433 // Batch 0, Channel 0
5434 1.0f, 2.0f,
5435
5436 // Batch 0, Channel 1
5437 3.0f, 4.0f,
5438
5439 // Batch 0, Channel 2
5440 5.0f, 6.0f,
5441
5442 // Batch 0, Channel 3
5443 7.0f, 8.0f,
5444
5445 // Batch 0, Channel 4
5446 9.0f, 10.0f,
5447
5448 // Batch 0, Channel 5
5449 11.0f, 12.0f,
5450
5451 // Batch 0, Channel 6
5452 13.0f, 14.0f,
5453
5454 // Batch 0, Channel 7
5455 15.0f, 16.0f,
5456
5457 // Batch 0, Channel 8
5458 17.0f, 18.0f,
5459
5460 // Batch 1, Channel 0
5461 19.0f, 20.0f,
5462
5463 // Batch 1, Channel 1
5464 21.0f, 22.0f,
5465
5466 // Batch 1, Channel 2
5467 23.0f, 24.0f,
5468
5469 // Batch 1, Channel 3
5470 25.0f, 26.0f,
5471
5472 // Batch 1, Channel 4
5473 27.0f, 28.0f,
5474
5475 // Batch 1, Channel 5
5476 29.0f, 30.0f,
5477
5478 // Batch 1, Channel 6
5479 31.0f, 32.0f,
5480
5481 // Batch 1, Channel 7
5482 33.0f, 34.0f,
5483
5484 // Batch 1, Channel 8
5485 35.0f, 36.0f
5486 }));
5487
5488 return result;
5489}
5490
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005491LayerTestResult<float, 3> Concatenation3dDim1Test(
5492 armnn::IWorkloadFactory& workloadFactory,
5493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005494{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005495 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005496}
5497
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005498template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005499LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5500 armnn::IWorkloadFactory& workloadFactory,
5501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005502 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005503 float qScale,
5504 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005505{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005506 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005507
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005508 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5509 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005510
telsoa014fcda012018-03-09 14:13:49 +00005511 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5512 // Batch 0, Channel 0
5513 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5514
5515 // Batch 0, Channel 1
5516 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5517
5518 // Batch 0, Channel 2
5519 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5520
5521 // Batch 1, Channel 0
5522 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5523
5524 // Batch 1, Channel 1
5525 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5526
5527 // Batch 1, Channel 2
5528 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5529 }));
5530
5531 return result;
5532}
5533
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005534LayerTestResult<float, 3> Concatenation3dDim2Test(
5535 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5537 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005538{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005539 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5540 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005541}
5542
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005543template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005544LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5545 armnn::IWorkloadFactory& workloadFactory,
5546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5547 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005548 int32_t qOffset)
5549{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005550 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005551 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5552 // Batch 0, Channel 0
5553 1.0f, 2.0f,
5554
5555 // Batch 0, Channel 1
5556 3.0f, 4.0f,
5557
5558 // Batch 0, Channel 2
5559 5.0f, 6.0f,
5560
5561 // Batch 1, Channel 0
5562 19.0f, 20.0f,
5563
5564 // Batch 1, Channel 1
5565 21.0f, 22.0f,
5566
5567 // Batch 1, Channel 2
5568 23.0f, 24.0f
5569 }));
5570
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005571 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005572 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5573 // Batch 0, Channel 0
5574 7.0f, 8.0f,
5575
5576 // Batch 0, Channel 1
5577 9.0f, 10.0f,
5578
5579 // Batch 0, Channel 2
5580 11.0f, 12.0f,
5581 }));
5582
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005583 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005584 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5585 // Batch 0, Channel 0
5586 25.0f, 26.0f,
5587
5588 // Batch 0, Channel 1
5589 27.0f, 28.0f,
5590
5591 // Batch 0, Channel 2
5592 29.0f, 30.0f,
5593
5594 // Batch 1, Channel 0
5595 13.0f, 14.0f,
5596
5597 // Batch 1, Channel 1
5598 15.0f, 16.0f,
5599
5600 // Batch 1, Channel 2
5601 17.0f, 18.0f,
5602
5603 // Batch 2, Channel 0
5604 31.0f, 32.0f,
5605
5606 // Batch 2, Channel 1
5607 33.0f, 34.0f,
5608
5609 // Batch 2, Channel 2
5610 35.0f, 36.0f
5611 }));
5612
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005613 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005614 LayerTestResult<T, 3> result(outputTensorInfo);
5615
5616 std::vector<T> output;
5617 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005618 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005619 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5620 { input0.data(), input1.data(), input2.data() },
5621 outputTensorInfo,
5622 output.data(),
5623 0,
5624 true);
telsoa014fcda012018-03-09 14:13:49 +00005625
5626 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5627 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5628 // Batch 0, Channel 0
5629 1.0f, 2.0f,
5630
5631 // Batch 0, Channel 1
5632 3.0f, 4.0f,
5633
5634 // Batch 0, Channel 2
5635 5.0f, 6.0f,
5636
5637 // Batch 1, Channel 0
5638 19.0f, 20.0f,
5639
5640 // Batch 1, Channel 1
5641 21.0f, 22.0f,
5642
5643 // Batch 1, Channel 2
5644 23.0f, 24.0f,
5645
5646 // Batch 2, Channel 0
5647 7.0f, 8.0f,
5648
5649 // Batch 2, Channel 1
5650 9.0f, 10.0f,
5651
5652 // Batch 2, Channel 2
5653 11.0f, 12.0f,
5654
5655 // Batch 3, Channel 0
5656 25.0f, 26.0f,
5657
5658 // Batch 3, Channel 1
5659 27.0f, 28.0f,
5660
5661 // Batch 3, Channel 2
5662 29.0f, 30.0f,
5663
5664 // Batch 4, Channel 0
5665 13.0f, 14.0f,
5666
5667 // Batch 4, Channel 1
5668 15.0f, 16.0f,
5669
5670 // Batch 4, Channel 2
5671 17.0f, 18.0f,
5672
5673 // Batch 5, Channel 0
5674 31.0f, 32.0f,
5675
5676 // Batch 5, Channel 1
5677 33.0f, 34.0f,
5678
5679 // Batch 5, Channel 2
5680 35.0f, 36.0f
5681 }));
5682
5683 return result;
5684}
5685
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005686LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5687 armnn::IWorkloadFactory& workloadFactory,
5688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005689{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005690 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5691 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005692}
5693
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005694template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005695LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5696 armnn::IWorkloadFactory& workloadFactory,
5697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5698 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005699 int32_t qOffset)
5700{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005701 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005702 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5703 // Batch 0, Channel 0
5704 1.0f, 2.0f,
5705
5706 // Batch 0, Channel 1
5707 3.0f, 4.0f,
5708
5709 // Batch 0, Channel 2
5710 5.0f, 6.0f,
5711
5712 // Batch 1, Channel 0
5713 19.0f, 20.0f,
5714
5715 // Batch 1, Channel 1
5716 21.0f, 22.0f,
5717
5718 // Batch 1, Channel 2
5719 23.0f, 24.0f
5720 }));
5721
Jim Flynncbb66aa2019-05-15 13:03:54 +01005722 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005723 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5724 // Batch 0, Channel 0
5725 7.0f, 8.0f,
5726
5727 // Batch 0, Channel 1
5728 9.0f, 10.0f,
5729
5730 // Batch 0, Channel 2
5731 11.0f, 12.0f,
5732
5733 // Batch 0, Channel 3
5734 25.0f, 26.0f,
5735
5736 // Batch 1, Channel 0
5737 27.0f, 28.0f,
5738
5739 // Batch 1, Channel 1
5740 29.0f, 30.0f,
5741
5742 // Batch 1, Channel 2
5743 13.0f, 14.0f,
5744
5745 // Batch 1, Channel 3
5746 15.0f, 16.0f,
5747 }));
5748
Jim Flynncbb66aa2019-05-15 13:03:54 +01005749 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005750 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5751 // Batch 0, Channel 0
5752 17.0f, 18.0f,
5753
5754 // Batch 1, Channel 0
5755 31.0f, 32.0f,
5756 }));
5757
Jim Flynncbb66aa2019-05-15 13:03:54 +01005758 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005759 LayerTestResult<T, 3> result(outputTensorInfo);
5760
5761 std::vector<T> output;
5762 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005763 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005764 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5765 { input0.data(), input1.data(), input2.data() },
5766 outputTensorInfo,
5767 output.data(),
5768 1,
5769 true);
telsoa014fcda012018-03-09 14:13:49 +00005770
5771 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5772 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5773 // Batch 0, Channel 0
5774 1.0f, 2.0f,
5775
5776 // Batch 0, Channel 1
5777 3.0f, 4.0f,
5778
5779 // Batch 0, Channel 2
5780 5.0f, 6.0f,
5781
5782 // Batch 0, Channel 3
5783 7.0f, 8.0f,
5784
5785 // Batch 0, Channel 4
5786 9.0f, 10.0f,
5787
5788 // Batch 0, Channel 5
5789 11.0f, 12.0f,
5790
5791 // Batch 0, Channel 6
5792 25.0f, 26.0f,
5793
5794 // Batch 0, Channel 7
5795 17.0f, 18.0f,
5796
5797 // Batch 1, Channel 0
5798 19.0f, 20.0f,
5799
5800 // Batch 1, Channel 1
5801 21.0f, 22.0f,
5802
5803 // Batch 1, Channel 2
5804 23.0f, 24.0f,
5805
5806 // Batch 1, Channel 3
5807 27.0f, 28.0f,
5808
5809 // Batch 1, Channel 4
5810 29.0f, 30.0f,
5811
5812 // Batch 1, Channel 5
5813 13.0f, 14.0f,
5814
5815 // Batch 1, Channel 6
5816 15.0f, 16.0f,
5817
5818 // Batch 1, Channel 7
5819 31.0f, 32.0f,
5820 }));
5821
5822 return result;
5823}
5824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005825LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5826 armnn::IWorkloadFactory& workloadFactory,
5827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005828{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005829 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5830 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005831}
5832
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005833template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005834LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5835 armnn::IWorkloadFactory& workloadFactory,
5836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005837 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005838 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005839 int32_t qOffset)
5840{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005841 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005842 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5843 // Batch 0, Channel 0
5844 1.0f, 2.0f,
5845
5846 // Batch 0, Channel 1
5847 3.0f, 4.0f,
5848
5849 // Batch 0, Channel 2
5850 5.0f, 6.0f,
5851
5852 // Batch 1, Channel 0
5853 19.0f, 20.0f,
5854
5855 // Batch 1, Channel 1
5856 21.0f, 22.0f,
5857
5858 // Batch 1, Channel 2
5859 23.0f, 24.0f
5860 }));
5861
Jim Flynncbb66aa2019-05-15 13:03:54 +01005862 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005863 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5864 // Batch 0, Channel 0
5865 7.0f,
5866
5867 // Batch 0, Channel 1
5868 9.0f,
5869
5870 // Batch 0, Channel 2
5871 11.0f,
5872
5873 // Batch 1, Channel 0
5874 25.0f,
5875
5876 // Batch 1, Channel 1
5877 27.0f,
5878
5879 // Batch 1, Channel 2
5880 29.0f
5881 }));
5882
Jim Flynncbb66aa2019-05-15 13:03:54 +01005883 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005884 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5885 // Batch 0, Channel 0
5886 13.0f, 14.0f, 50.0f,
5887
5888 // Batch 0, Channel 1
5889 15.0f, 16.0f, 51.0f,
5890
5891 // Batch 0, Channel 2
5892 17.0f, 18.0f, 52.0f,
5893
5894 // Batch 1, Channel 0
5895 31.0f, 32.0f, 53.0f,
5896
5897 // Batch 1, Channel 1
5898 33.0f, 34.0f, 54.0f,
5899
5900 // Batch 1, Channel 2
5901 35.0f, 36.0f, 55.0f,
5902 }));
5903
Jim Flynncbb66aa2019-05-15 13:03:54 +01005904 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005905 LayerTestResult<T, 3> result(outputTensorInfo);
5906
5907 std::vector<T> output;
5908 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005909 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005910 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5911 { input0.data(), input1.data(), input2.data() },
5912 outputTensorInfo,
5913 output.data(),
5914 2,
5915 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005916
5917 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5918 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5919 // Batch 0, Channel 0
5920 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5921
5922 // Batch 0, Channel 1
5923 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5924
5925 // Batch 0, Channel 2
5926 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5927
5928 // Batch 1, Channel 0
5929 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5930
5931 // Batch 1, Channel 1
5932 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5933
5934 // Batch 1, Channel 2
5935 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5936 }));
5937
5938 return result;
5939}
5940
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005941LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5942 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5944 bool useSubtensor)
5945{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005946 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5947 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005948}
5949
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005950template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005951LayerTestResult<T, 4> Concatenation4dTestImpl(
5952 armnn::IWorkloadFactory& workloadFactory,
5953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5954 const armnn::TensorInfo& outputTensorInfo,
5955 unsigned int dimension,
5956 bool useSubtensor,
5957 float qScale,
5958 int32_t qOffset)
5959{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005960 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005961
5962 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5963 1.0f, 2.0f,
5964 3.0f, 4.0f,
5965 5.0f, 6.0f,
5966 7.0f, 8.0f,
5967 9.0f, 10.0f,
5968 11.0f, 12.0f
5969 }));
5970
5971 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5972 11.0f, 12.0f,
5973 13.0f, 14.0f,
5974 15.0f, 16.0f,
5975 17.0f, 18.0f,
5976 19.0f, 20.0f,
5977 21.0f, 22.0f
5978 }));
5979
5980 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5981 21.0f, 22.0f,
5982 23.0f, 24.0f,
5983 25.0f, 26.0f,
5984 27.0f, 28.0f,
5985 29.0f, 30.0f,
5986 31.0f, 32.0f
5987 }));
5988
5989 LayerTestResult<T, 4> result(outputTensorInfo);
5990
5991 std::vector<T> output;
5992 output.resize(outputTensorInfo.GetNumElements());
5993
5994 Concatenate<T>(workloadFactory,
5995 memoryManager,
5996 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5997 {input0.data(), input1.data(), input2.data()},
5998 outputTensorInfo,
5999 output.data(),
6000 dimension,
6001 useSubtensor);
6002
6003 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6004 return result;
6005}
6006
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006007template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006008LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
6009 armnn::IWorkloadFactory& workloadFactory,
6010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6011 float qScale,
6012 int32_t qOffset)
6013{
Jim Flynncbb66aa2019-05-15 13:03:54 +01006014 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006015
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006016 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6017 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
6018
narpra015cdda352018-11-19 15:30:27 +00006019 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6020 1.0f, 2.0f,
6021 3.0f, 4.0f,
6022 5.0f, 6.0f,
6023 7.0f, 8.0f,
6024 9.0f, 10.0f,
6025 11.0f, 12.0f,
6026
6027 11.0f, 12.0f,
6028 13.0f, 14.0f,
6029 15.0f, 16.0f,
6030 17.0f, 18.0f,
6031 19.0f, 20.0f,
6032 21.0f, 22.0f,
6033
6034 21.0f, 22.0f,
6035 23.0f, 24.0f,
6036 25.0f, 26.0f,
6037 27.0f, 28.0f,
6038 29.0f, 30.0f,
6039 31.0f, 32.0f
6040 }));
6041 return result;
6042}
6043
6044LayerTestResult<float, 4> Concatenation4dDim0Test(
6045 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006047{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006048 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006049}
6050
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006051template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006052LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
6053 armnn::IWorkloadFactory& workloadFactory,
6054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6055 float qScale,
6056 int32_t qOffset)
6057{
Jim Flynncbb66aa2019-05-15 13:03:54 +01006058 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006059
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006060 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6061 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
6062
narpra015cdda352018-11-19 15:30:27 +00006063 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6064 1.0f, 2.0f,
6065 3.0f, 4.0f,
6066 5.0f, 6.0f,
6067 7.0f, 8.0f,
6068 9.0f, 10.0f,
6069 11.0f, 12.0f,
6070
6071 11.0f, 12.0f,
6072 13.0f, 14.0f,
6073 15.0f, 16.0f,
6074 17.0f, 18.0f,
6075 19.0f, 20.0f,
6076 21.0f, 22.0f,
6077
6078 21.0f, 22.0f,
6079 23.0f, 24.0f,
6080 25.0f, 26.0f,
6081 27.0f, 28.0f,
6082 29.0f, 30.0f,
6083 31.0f, 32.0f
6084 }));
6085
6086 return result;
6087}
6088
6089LayerTestResult<float, 4> Concatenation4dDim1Test(
6090 armnn::IWorkloadFactory& workloadFactory,
6091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6092{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006093 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006094}
6095
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006096template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006097LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
6098 armnn::IWorkloadFactory& workloadFactory,
6099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6100 float qScale,
6101 int32_t qOffset)
6102{
Jim Flynncbb66aa2019-05-15 13:03:54 +01006103 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006104
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006105 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6106 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
6107
narpra015cdda352018-11-19 15:30:27 +00006108 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6109 1.0f, 2.0f,
6110 3.0f, 4.0f,
6111 11.0f, 12.0f,
6112 13.0f, 14.0f,
6113 21.0f, 22.0f,
6114 23.0f, 24.0f,
6115
6116 5.0f, 6.0f,
6117 7.0f, 8.0f,
6118 15.0f, 16.0f,
6119 17.0f, 18.0f,
6120 25.0f, 26.0f,
6121 27.0f, 28.0f,
6122
6123 9.0f, 10.0f,
6124 11.0f, 12.0f,
6125 19.0f, 20.0f,
6126 21.0f, 22.0f,
6127 29.0f, 30.0f,
6128 31.0f, 32.0f
6129 }));
6130
6131 return result;
6132}
6133
6134LayerTestResult<float, 4> Concatenation4dDim2Test(
6135 armnn::IWorkloadFactory& workloadFactory,
6136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6137{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006138 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006139}
6140
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006141template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006142LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
6143 armnn::IWorkloadFactory& workloadFactory,
6144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6145 float qScale,
6146 int32_t qOffset,
6147 bool useSubtensor)
6148{
Jim Flynncbb66aa2019-05-15 13:03:54 +01006149 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006150
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006151 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
6152 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
6153
narpra015cdda352018-11-19 15:30:27 +00006154 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6155 1.0f, 2.0f,
6156 11.0f, 12.0f,
6157 21.0f, 22.0f,
6158 3.0f, 4.0f,
6159 13.0f, 14.0f,
6160 23.0f, 24.0f,
6161
6162 5.0f, 6.0f,
6163 15.0f, 16.0f,
6164 25.0f, 26.0f,
6165 7.0f, 8.0f,
6166 17.0f, 18.0f,
6167 27.0f, 28.0f,
6168
6169 9.0f, 10.0f,
6170 19.0f, 20.0f,
6171 29.0f, 30.0f,
6172 11.0f, 12.0f,
6173 21.0f, 22.0f,
6174 31.0f, 32.0f
6175 }));
6176
6177 return result;
6178}
6179
6180LayerTestResult<float, 4> Concatenation4dDim3Test(
6181 armnn::IWorkloadFactory& workloadFactory,
6182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6183 bool useSubtensor)
6184{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006185 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
6186 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00006187}
6188
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006189template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006190LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
6191 armnn::IWorkloadFactory& workloadFactory,
6192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6193 float qScale,
6194 int32_t qOffset)
6195{
6196 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006197 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006198
6199 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6200 1.0f, 2.0f,
6201 3.0f, 4.0f,
6202 5.0f, 6.0f,
6203 7.0f, 8.0f,
6204 9.0f, 10.0f,
6205 11.0f, 12.0f
6206 }));
6207
Jim Flynncbb66aa2019-05-15 13:03:54 +01006208 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006209
6210 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6211 11.0f, 12.0f,
6212 13.0f, 14.0f,
6213 15.0f, 16.0f,
6214 17.0f, 18.0f,
6215 19.0f, 20.0f,
6216 21.0f, 22.0f,
6217
6218 21.0f, 22.0f,
6219 23.0f, 24.0f,
6220 25.0f, 26.0f,
6221 27.0f, 28.0f,
6222 29.0f, 30.0f,
6223 31.0f, 32.0f
6224
6225 }));
6226
Jim Flynncbb66aa2019-05-15 13:03:54 +01006227 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006228
6229 LayerTestResult<T, 4> result(outputTensorInfo);
6230
6231 std::vector<T> output;
6232 output.resize(outputTensorInfo.GetNumElements());
6233 Concatenate<T>(workloadFactory,
6234 memoryManager,
6235 {inputTensorInfo0, inputTensorInfo1},
6236 {input0.data(), input1.data()},
6237 outputTensorInfo,
6238 output.data(),
6239 dimension,
6240 true);
6241
6242 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6243 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6244 1.0f, 2.0f,
6245 3.0f, 4.0f,
6246 5.0f, 6.0f,
6247 7.0f, 8.0f,
6248 9.0f, 10.0f,
6249 11.0f, 12.0f,
6250
6251 11.0f, 12.0f,
6252 13.0f, 14.0f,
6253 15.0f, 16.0f,
6254 17.0f, 18.0f,
6255 19.0f, 20.0f,
6256 21.0f, 22.0f,
6257
6258 21.0f, 22.0f,
6259 23.0f, 24.0f,
6260 25.0f, 26.0f,
6261 27.0f, 28.0f,
6262 29.0f, 30.0f,
6263 31.0f, 32.0f
6264 }));
6265
6266 return result;
6267}
6268
6269LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
6270 armnn::IWorkloadFactory& workloadFactory,
6271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6272{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006273 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
6274 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006275}
6276
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006277template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006278LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
6279 armnn::IWorkloadFactory& workloadFactory,
6280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6281 float qScale,
6282 int32_t qOffset)
6283{
6284 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006285 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006286
6287 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6288 1.0f, 2.0f,
6289 3.0f, 4.0f,
6290 5.0f, 6.0f,
6291 7.0f, 8.0f,
6292 9.0f, 10.0f,
6293 11.0f, 12.0f
6294 }));
6295
Jim Flynncbb66aa2019-05-15 13:03:54 +01006296 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006297
6298 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6299 11.0f, 12.0f,
6300 13.0f, 14.0f,
6301 15.0f, 16.0f,
6302 17.0f, 18.0f,
6303
6304 }));
6305
Jim Flynncbb66aa2019-05-15 13:03:54 +01006306 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006307
6308 LayerTestResult<T, 4> result(outputTensorInfo);
6309
6310 std::vector<T> output;
6311 output.resize(outputTensorInfo.GetNumElements());
6312 Concatenate<T>(workloadFactory,
6313 memoryManager,
6314 {inputTensorInfo0, inputTensorInfo1},
6315 {input0.data(), input1.data()},
6316 outputTensorInfo,
6317 output.data(),
6318 dimension,
6319 true);
6320
6321 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6322 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6323 1.0f, 2.0f,
6324 3.0f, 4.0f,
6325 5.0f, 6.0f,
6326 7.0f, 8.0f,
6327 9.0f, 10.0f,
6328 11.0f, 12.0f,
6329 11.0f, 12.0f,
6330 13.0f, 14.0f,
6331 15.0f, 16.0f,
6332 17.0f, 18.0f
6333 }));
6334
6335 return result;
6336}
6337
6338LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
6339 armnn::IWorkloadFactory& workloadFactory,
6340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6341{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006342 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
6343 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006344}
6345
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006346template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006347LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
6348 armnn::IWorkloadFactory& workloadFactory,
6349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6350 float qScale,
6351 int32_t qOffset)
6352{
6353 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006354 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006355
6356 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6357 1.0f, 2.0f,
6358 3.0f, 4.0f,
6359 5.0f, 6.0f,
6360 7.0f, 8.0f,
6361 9.0f, 10.0f,
6362 11.0f, 12.0f
6363 }));
6364
Jim Flynncbb66aa2019-05-15 13:03:54 +01006365 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006366
6367 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6368 11.0f, 12.0f,
6369 13.0f, 14.0f,
6370 15.0f, 16.0f,
6371 17.0f, 18.0f,
6372 19.0f, 20.0f,
6373 21.0f, 22.0f,
6374 23.0f, 24.0f,
6375 25.0f, 26.0f,
6376 27.0f, 28.0f
6377 }));
6378
Jim Flynncbb66aa2019-05-15 13:03:54 +01006379 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006380
6381 LayerTestResult<T, 4> result(outputTensorInfo);
6382
6383 std::vector<T> output;
6384 output.resize(outputTensorInfo.GetNumElements());
6385 Concatenate<T>(workloadFactory,
6386 memoryManager,
6387 {inputTensorInfo0, inputTensorInfo1},
6388 {input0.data(), input1.data()},
6389 outputTensorInfo,
6390 output.data(),
6391 dimension,
6392 true);
6393
6394 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6395 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6396 1.0f, 2.0f,
6397 3.0f, 4.0f,
6398 11.0f, 12.0f,
6399 13.0f, 14.0f,
6400 15.0f, 16.0f,
6401
6402 5.0f, 6.0f,
6403 7.0f, 8.0f,
6404 17.0f, 18.0f,
6405 19.0f, 20.0f,
6406 21.0f, 22.0f,
6407
6408 9.0f, 10.0f,
6409 11.0f, 12.0f,
6410 23.0f, 24.0f,
6411 25.0f, 26.0f,
6412 27.0f, 28.0f
6413 }));
6414
6415 return result;
6416}
6417
6418LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
6419 armnn::IWorkloadFactory& workloadFactory,
6420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6421{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006422 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
6423 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00006424}
6425
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006426template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00006427LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
6428 armnn::IWorkloadFactory& workloadFactory,
6429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6430 float qScale,
6431 int32_t qOffset,
6432 bool useSubtensor)
6433{
6434 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006435 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006436
6437 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
6438 1.0f, 2.0f,
6439 3.0f, 4.0f,
6440 5.0f, 6.0f,
6441 7.0f, 8.0f,
6442 9.0f, 10.0f,
6443 11.0f, 12.0f
6444 }));
6445
Jim Flynncbb66aa2019-05-15 13:03:54 +01006446 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006447
6448 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
6449 11.0f, 12.0f, 13.0f,
6450 14.0f, 15.0f, 16.0f,
6451
6452 17.0f, 18.0f, 19.0f,
6453 20.0f, 21.0f, 22.0f,
6454
6455 23.0f, 24.0f, 25.0f,
6456 26.0f, 27.0f, 28.0f
6457 }));
6458
Jim Flynncbb66aa2019-05-15 13:03:54 +01006459 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00006460
6461 LayerTestResult<T, 4> result(outputTensorInfo);
6462
6463 std::vector<T> output;
6464 output.resize(outputTensorInfo.GetNumElements());
6465 Concatenate<T>(workloadFactory,
6466 memoryManager,
6467 {inputTensorInfo0, inputTensorInfo1},
6468 {input0.data(), input1.data()},
6469 outputTensorInfo,
6470 output.data(),
6471 dimension,
6472 useSubtensor);
6473
6474 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
6475 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
6476 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
6477 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
6478 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
6479 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
6480 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
6481 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
6482 }));
6483
6484 return result;
6485}
6486
6487LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6488 armnn::IWorkloadFactory& workloadFactory,
6489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6490 bool useSubtensor)
6491{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006492 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6493 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006494}
6495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006496LayerTestResult<float, 2> FakeQuantizationTest(
6497 armnn::IWorkloadFactory& workloadFactory,
6498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006499{
6500 constexpr unsigned int width = 2;
6501 constexpr unsigned int height = 3;
6502
6503 const armnn::TensorInfo tensorInfo({height, width },
6504 armnn::DataType::Float32);
6505 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6506 -10.0f, -5.0f,
6507 0.0f, 5.0f,
6508 10.0f, 10.0f
6509 }));
6510
6511 LayerTestResult<float, 2> ret(tensorInfo);
6512
6513 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6514
6515 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6516
6517 armnn::FakeQuantizationQueueDescriptor data;
6518 armnn::WorkloadInfo info;
6519
6520 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6521 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6522 float min = -10.f;
6523 float max = 10.f;
6524
6525 data.m_Parameters.m_Min = min;
6526 data.m_Parameters.m_Max = max;
6527
6528 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6529 armnn::FakeQuantizationQueueDescriptor refData = data;
6530 armnn::WorkloadInfo refInfo = info;
6531 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6532
6533 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6534
6535 inputHandle->Allocate();
6536 outputHandle->Allocate();
6537
6538 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6539
Derek Lambertif30f7d32019-04-09 10:25:02 +01006540 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006541 workload->Execute();
6542
6543 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6544
6545 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6546 0.0f, 63.0f,
6547 128.0f, 191.0f,
6548 255.0f, 255.0f
6549 }));
6550 return ret;
6551}
6552
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006553namespace
6554{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006555template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6556LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006557 armnn::IWorkloadFactory& workloadFactory,
6558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6559 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006560 float scale,
6561 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006562 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006563 float outScale,
6564 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006565 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006566 const armnn::DataLayout layout,
6567 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006568{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006569 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6570 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006571
jimfly013aab7c32018-11-12 13:32:08 +00006572 // at this point if we require it permute the input data
6573 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6574 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006575 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006576 {
6577 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006578 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006579 inputData = tmp;
6580 }
6581
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006582 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6583 inputTensorInfo.GetQuantizationScale(),
6584 inputTensorInfo.GetQuantizationOffset(),
6585 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006586
jimfly013aab7c32018-11-12 13:32:08 +00006587 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006588 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006589 {
6590 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006591 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6592 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006593 expectedOutputData = tmp;
6594 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006595
6596 LayerTestResult<T, 4> result(outputTensorInfo);
6597 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6598 outputTensorInfo.GetQuantizationScale(),
6599 outputTensorInfo.GetQuantizationOffset(),
6600 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006601
6602 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6603 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6604
6605 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006606 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006607 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006608 armnn::WorkloadInfo info;
6609
6610 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6611 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6612
6613 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6614
6615 inputHandle->Allocate();
6616 outputHandle->Allocate();
6617
6618 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6619
Derek Lambertif30f7d32019-04-09 10:25:02 +01006620 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006621 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006622
6623 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6624
6625 return result;
6626}
6627
6628float CalcInvL2Norm(std::initializer_list<float> elements)
6629{
6630 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6631 [](float acc, float element) { return acc + element * element; });
6632 return 1.0f / sqrtf(reduction);
6633}
6634
6635} // anonymous namespace
6636
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006637template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006638LayerTestResult<T, 2> Pad2dTestCommon(
6639 armnn::IWorkloadFactory& workloadFactory,
6640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6641 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006642 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006643 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006644{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006645 const armnn::TensorShape inputShape{ 3, 3 };
6646 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006647
David Monahan34757812019-06-19 11:47:21 +01006648 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6649 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006650
Derek Lambertif30f7d32019-04-09 10:25:02 +01006651 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006652 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006653 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006654 // Height (3) x Width (3)
6655 4, 8, 6,
6656 7, 4, 4,
6657 3, 2, 4
6658 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006659
Teresa Charlinec8e1982019-07-02 16:24:09 +01006660 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006661 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006662 expectedOutputValues = (
6663 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006664 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006665 p, p, p, p, p, p, p,
6666 p, p, p, p, p, p, p,
6667 p, p, 4, 8, 6, p, p,
6668 p, p, 7, 4, 4, p, p,
6669 p, p, 3, 2, 4, p, p,
6670 p, p, p, p, p, p, p,
6671 p, p, p, p, p, p, p
6672 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006673
Derek Lambertif30f7d32019-04-09 10:25:02 +01006674 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006675
Derek Lambertif30f7d32019-04-09 10:25:02 +01006676 LayerTestResult<T, 2> result(outputTensorInfo);
6677 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006678
Derek Lambertif30f7d32019-04-09 10:25:02 +01006679 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6680 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006681
Derek Lambertif30f7d32019-04-09 10:25:02 +01006682 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006683
Teresa Charlinec8e1982019-07-02 16:24:09 +01006684 std::vector<std::pair<unsigned int, unsigned int>> padList;
6685 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6686 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006687
Teresa Charlinec8e1982019-07-02 16:24:09 +01006688 descriptor.m_Parameters.m_PadList = padList;
6689 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006690 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006691
Derek Lambertif30f7d32019-04-09 10:25:02 +01006692 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6693 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006694
Derek Lambertif30f7d32019-04-09 10:25:02 +01006695 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006696
Derek Lambertif30f7d32019-04-09 10:25:02 +01006697 inputHandle->Allocate();
6698 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006699
Derek Lambertif30f7d32019-04-09 10:25:02 +01006700 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006701
Derek Lambertif30f7d32019-04-09 10:25:02 +01006702 workload->PostAllocationConfigure();
6703 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006704
Derek Lambertif30f7d32019-04-09 10:25:02 +01006705 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006706
Derek Lambertif30f7d32019-04-09 10:25:02 +01006707 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006708}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006709
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006710template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006711LayerTestResult<T, 3> Pad3dTestCommon(
6712 armnn::IWorkloadFactory& workloadFactory,
6713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6714 float qScale,
6715 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006716{
6717 const armnn::TensorShape inputShape{ 2, 2, 2 };
6718 const armnn::TensorShape outputShape{ 3, 5, 6 };
6719
David Monahan34757812019-06-19 11:47:21 +01006720 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6721 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006722
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006723 std::vector<T> inputValues(
6724 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006725 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006726 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006727 0, 4,
6728 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006729
6730 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006731 6, 1,
6732 5, 2
6733 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006734
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006735 std::vector<T> expectedOutputValues(
6736 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006737 {
6738
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006739 0, 0, 0, 0, 0, 0,
6740 0, 0, 0, 0, 0, 0,
6741 0, 0, 0, 4, 0, 0,
6742 0, 0, 2, 5, 0, 0,
6743 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006744
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006745 0, 0, 0, 0, 0, 0,
6746 0, 0, 0, 0, 0, 0,
6747 0, 0, 6, 1, 0, 0,
6748 0, 0, 5, 2, 0, 0,
6749 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006750
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006751 0, 0, 0, 0, 0, 0,
6752 0, 0, 0, 0, 0, 0,
6753 0, 0, 0, 0, 0, 0,
6754 0, 0, 0, 0, 0, 0,
6755 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006756
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006757 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006758
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006759 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006760
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006761 LayerTestResult<T, 3> result(outputTensorInfo);
6762 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006763
6764 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6765 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6766
6767 armnn::PadQueueDescriptor descriptor;
6768
6769 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6770 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6771 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6772 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6773
6774 descriptor.m_Parameters.m_PadList = PadList;
6775 armnn::WorkloadInfo info;
6776
6777 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6778 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6779
6780 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6781
6782 inputHandle->Allocate();
6783 outputHandle->Allocate();
6784
6785 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6786
Derek Lambertif30f7d32019-04-09 10:25:02 +01006787 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006788 workload->Execute();
6789
6790 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6791
6792 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006793}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006794
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006795template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006796LayerTestResult<T, 4> Pad4dTestCommon(
6797 armnn::IWorkloadFactory& workloadFactory,
6798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6799 float qScale,
6800 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006801{
6802 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6803 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6804
David Monahan34757812019-06-19 11:47:21 +01006805 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6806 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006807
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006808 std::vector<T> inputValues(
6809 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006810 {
6811 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006812 0, 1,
6813 2, 3,
6814 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006815
6816 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006817 6, 7,
6818 8, 9,
6819 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006820
6821 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006822 12, 13,
6823 14, 15,
6824 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006825
6826 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006827 18, 19,
6828 20, 21,
6829 22, 23
6830 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006832 std::vector<T> expectedOutputValues(
6833 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006834 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006835 0, 0, 0, 0,
6836 0, 0, 0, 0,
6837 0, 0, 0, 0,
6838 0, 0, 0, 0,
6839 0, 0, 0, 0,
6840 0, 0, 0, 0,
6841 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006842
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006843 0, 0, 0, 0,
6844 0, 0, 0, 0,
6845 0, 0, 0, 0,
6846 0, 0, 0, 0,
6847 0, 0, 0, 0,
6848 0, 0, 0, 0,
6849 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006850
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006851 0, 0, 0, 0,
6852 0, 0, 0, 0,
6853 0, 0, 0, 0,
6854 0, 0, 0, 0,
6855 0, 0, 0, 0,
6856 0, 0, 0, 0,
6857 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006858
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006859 0, 0, 0, 0,
6860 0, 0, 0, 0,
6861 0, 0, 0, 0,
6862 0, 0, 0, 0,
6863 0, 0, 0, 0,
6864 0, 0, 0, 0,
6865 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006866
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006867 0, 0, 0, 0,
6868 0, 0, 0, 0,
6869 0, 0, 0, 0,
6870 0, 0, 0, 0,
6871 0, 0, 0, 0,
6872 0, 0, 0, 0,
6873 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006874
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006875 0, 0, 0, 0,
6876 0, 0, 0, 0,
6877 0, 0, 0, 0,
6878 0, 0, 0, 0,
6879 0, 0, 0, 0,
6880 0, 0, 0, 0,
6881 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006882
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006883 0, 0, 0, 0,
6884 0, 0, 0, 0,
6885 0, 0, 0, 0,
6886 0, 0, 0, 0,
6887 0, 0, 0, 0,
6888 0, 0, 0, 0,
6889 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006890
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006891 0, 0, 0, 0,
6892 0, 0, 0, 0,
6893 0, 0, 0, 0,
6894 0, 0, 1, 0,
6895 0, 2, 3, 0,
6896 0, 4, 5, 0,
6897 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006898
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006899 0, 0, 0, 0,
6900 0, 0, 0, 0,
6901 0, 0, 0, 0,
6902 0, 6, 7, 0,
6903 0, 8, 9, 0,
6904 0, 10, 11, 0,
6905 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006906
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006907 0, 0, 0, 0,
6908 0, 0, 0, 0,
6909 0, 0, 0, 0,
6910 0, 0, 0, 0,
6911 0, 0, 0, 0,
6912 0, 0, 0, 0,
6913 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006914
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006915 0, 0, 0, 0,
6916 0, 0, 0, 0,
6917 0, 0, 0, 0,
6918 0, 0, 0, 0,
6919 0, 0, 0, 0,
6920 0, 0, 0, 0,
6921 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006922
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006923 0, 0, 0, 0,
6924 0, 0, 0, 0,
6925 0, 0, 0, 0,
6926 0, 0, 0, 0,
6927 0, 0, 0, 0,
6928 0, 0, 0, 0,
6929 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006930
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006931 0, 0, 0, 0,
6932 0, 0, 0, 0,
6933 0, 0, 0, 0,
6934 0, 12, 13, 0,
6935 0, 14, 15, 0,
6936 0, 16, 17, 0,
6937 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006938
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006939 0, 0, 0, 0,
6940 0, 0, 0, 0,
6941 0, 0, 0, 0,
6942 0, 18, 19, 0,
6943 0, 20, 21, 0,
6944 0, 22, 23, 0,
6945 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006946
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006947 0, 0, 0, 0,
6948 0, 0, 0, 0,
6949 0, 0, 0, 0,
6950 0, 0, 0, 0,
6951 0, 0, 0, 0,
6952 0, 0, 0, 0,
6953 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006954
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006955 0, 0, 0, 0,
6956 0, 0, 0, 0,
6957 0, 0, 0, 0,
6958 0, 0, 0, 0,
6959 0, 0, 0, 0,
6960 0, 0, 0, 0,
6961 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006962
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006963 0, 0, 0, 0,
6964 0, 0, 0, 0,
6965 0, 0, 0, 0,
6966 0, 0, 0, 0,
6967 0, 0, 0, 0,
6968 0, 0, 0, 0,
6969 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006970
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006971 0, 0, 0, 0,
6972 0, 0, 0, 0,
6973 0, 0, 0, 0,
6974 0, 0, 0, 0,
6975 0, 0, 0, 0,
6976 0, 0, 0, 0,
6977 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006978
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006979 0, 0, 0, 0,
6980 0, 0, 0, 0,
6981 0, 0, 0, 0,
6982 0, 0, 0, 0,
6983 0, 0, 0, 0,
6984 0, 0, 0, 0,
6985 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006986
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006987 0, 0, 0, 0,
6988 0, 0, 0, 0,
6989 0, 0, 0, 0,
6990 0, 0, 0, 0,
6991 0, 0, 0, 0,
6992 0, 0, 0, 0,
6993 0, 0, 0, 0
6994 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006995
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006996 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006997
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006998 LayerTestResult<T, 4> result(outputTensorInfo);
6999 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01007000
7001 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7002 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7003
7004 armnn::PadQueueDescriptor descriptor;
7005
7006 std::vector<std::pair<unsigned int, unsigned int>> PadList;
7007 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
7008 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
7009 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
7010 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
7011
7012 descriptor.m_Parameters.m_PadList = PadList;
7013 armnn::WorkloadInfo info;
7014
7015 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7016 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7017
7018 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
7019
7020 inputHandle->Allocate();
7021 outputHandle->Allocate();
7022
7023 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
7024
Derek Lambertif30f7d32019-04-09 10:25:02 +01007025 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01007026 workload->Execute();
7027
7028 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7029
7030 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007031}
7032
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007033LayerTestResult<uint8_t, 2> PadUint82dTest(
7034 armnn::IWorkloadFactory& workloadFactory,
7035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007036{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007037 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007038}
7039
David Monahan34757812019-06-19 11:47:21 +01007040LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
7041 armnn::IWorkloadFactory& workloadFactory,
7042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7043{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01007044 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01007045}
7046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007047LayerTestResult<uint8_t, 3> PadUint83dTest(
7048 armnn::IWorkloadFactory& workloadFactory,
7049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007050{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007051 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007052}
7053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007054LayerTestResult<uint8_t, 4> PadUint84dTest(
7055 armnn::IWorkloadFactory& workloadFactory,
7056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007057{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007058 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007059}
7060
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01007061
7062template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
7063Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
7064 armnn::IWorkloadFactory& workloadFactory,
7065 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7066 float qScale,
7067 int32_t qOffset,
7068 const float customPaddingValue);
7069
7070template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
7071Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
7072 armnn::IWorkloadFactory& workloadFactory,
7073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7074 float qScale,
7075 int32_t qOffset);
7076
7077template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
7078Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
7079 armnn::IWorkloadFactory& workloadFactory,
7080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7081 float qScale,
7082 int32_t qOffset);
7083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007084LayerTestResult<float, 2> PadFloat322dTest(
7085 armnn::IWorkloadFactory& workloadFactory,
7086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007087{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007088 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007089}
7090
David Monahan34757812019-06-19 11:47:21 +01007091LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
7092 armnn::IWorkloadFactory& workloadFactory,
7093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7094{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01007095 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01007096}
7097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007098LayerTestResult<float, 3> PadFloat323dTest(
7099 armnn::IWorkloadFactory& workloadFactory,
7100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007101{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007102 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007103}
7104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007105LayerTestResult<float, 4> PadFloat324dTest(
7106 armnn::IWorkloadFactory& workloadFactory,
7107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007108{
Teresa Charlinec8e1982019-07-02 16:24:09 +01007109 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01007110}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01007111
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007112template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01007113LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
7114 armnn::IWorkloadFactory& workloadFactory,
7115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7116 float scale,
7117 int32_t offset,
7118 float outScale,
7119 int32_t outOffset,
7120 const armnn::DataLayout layout,
7121 float epsilon)
7122{
7123 // Width: 1
7124 // Height: 1
7125 // Channels: 3
7126 // BatchSize: 1
7127 unsigned int numberOfBatches = 1;
7128 unsigned int numberOfChannels = 3;
7129 unsigned int height = 1;
7130 unsigned int width = 1;
7131
7132 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
7133 numberOfBatches, numberOfChannels, height, width, layout);
7134
7135 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
7136 std::vector<float> inputValues
7137 {
7138 // Batch 0, Channel 0, Height (1) x Width (1)
7139 0.00000001f,
7140
7141 // Batch 0, Channel 1, Height (1) x Width (1)
7142 0.00000002f,
7143
7144 // Batch 0, Channel 2, Height (1) x Width (1)
7145 0.00000003f,
7146 };
7147
7148 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
7149 std::vector<float> expectedOutputValues
7150 {
7151 // Batch 0, Channel 0, Height (1) x Width (1)
7152 0.00000001f * approxInvL2Norm,
7153 0.00000002f * approxInvL2Norm,
7154 0.00000003f * approxInvL2Norm,
7155 };
7156
7157 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7158 inputValues, outScale, outOffset, expectedOutputValues, layout,
7159 epsilon);
7160}
7161
7162
7163template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007164LayerTestResult<T, 4> L2Normalization1dTestCommon(
7165 armnn::IWorkloadFactory& workloadFactory,
7166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007167 float scale,
7168 int32_t offset,
7169 float outScale,
7170 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007171 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007172{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007173 // Width: 1
7174 // Height: 1
7175 // Channels: 10
7176 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007177 unsigned int numberOfBatches = 1;
7178 unsigned int numberOfChannels = 10;
7179 unsigned int height = 1;
7180 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00007181
jimfly013aab7c32018-11-12 13:32:08 +00007182
Nina Drozdd41b2592018-11-19 13:03:36 +00007183 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007184 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007185 std::vector<float> inputValues
7186 {
7187 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007188 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00007189
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007190 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007191 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00007192
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007193 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007194 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00007195
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007196 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007197 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007198
7199 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007200 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007201
7202 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007203 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007204
7205 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007206 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007207
7208 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007209 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007210
7211 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007212 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007213
7214 // Batch 0, Channel 9, Height (1) x Width (1)
7215 10.0f
7216 };
telsoa014fcda012018-03-09 14:13:49 +00007217 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007218 std::vector<float> expectedOutputValues
7219 {
7220 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007221 1.0f * approxInvL2Norm,
7222 2.0f * approxInvL2Norm,
7223 3.0f * approxInvL2Norm,
7224 4.0f * approxInvL2Norm,
7225 5.0f * approxInvL2Norm,
7226 6.0f * approxInvL2Norm,
7227 7.0f * approxInvL2Norm,
7228 8.0f * approxInvL2Norm,
7229 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00007230 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007231 };
telsoa014fcda012018-03-09 14:13:49 +00007232
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007233
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007234 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7235 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00007236}
7237
Ferran Balaguere52211e2019-06-17 12:23:52 +01007238LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
7239 armnn::IWorkloadFactory& workloadFactory,
7240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7241 const armnn::DataLayout layout)
7242{
7243 // Dummy descriptor to get the default value of epsilon.
7244 armnn::L2NormalizationDescriptor descriptor;
7245
7246 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7247 layout, descriptor.m_Eps);
7248}
7249
7250LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
7251 armnn::IWorkloadFactory& workloadFactory,
7252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7253 const armnn::DataLayout layout)
7254{
7255 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7256 layout, 1e-9f);
7257}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007258
7259LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007260 armnn::IWorkloadFactory& workloadFactory,
7261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007262 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007263{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007264 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007265}
7266
7267LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
7268 armnn::IWorkloadFactory& workloadFactory,
7269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7270 const armnn::DataLayout layout)
7271{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007272 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007273 layout);
7274}
7275
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007276LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
7277 armnn::IWorkloadFactory& workloadFactory,
7278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7279 const armnn::DataLayout layout)
7280{
7281 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7282 1.f/128, 128, layout);
7283}
7284
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007285template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7286LayerTestResult<T, 4> L2Normalization2dTestCommon(
7287 armnn::IWorkloadFactory& workloadFactory,
7288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007289 float scale,
7290 int32_t offset,
7291 float outScale,
7292 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007293 const armnn::DataLayout layout)
7294{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007295 // Width: 5
7296 // Height: 1
7297 // Channels: 2
7298 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007299 unsigned int numberOfBatches = 1;
7300 unsigned int numberOfChannels = 2;
7301 unsigned int height = 1;
7302 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00007303
Nina Drozdd41b2592018-11-19 13:03:36 +00007304 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007305 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007306 std::vector<float> inputValues
7307 {
7308 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00007309 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00007310
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007311 // Batch 0, Channel 1, Height (1) x Width (5)
7312 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
7313 };
7314 std::vector<float> expectedOutputValues
7315 {
7316 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007317 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7318 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7319 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7320 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7321 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007322
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007323 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007324 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7325 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7326 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7327 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007328 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007329 };
telsoa014fcda012018-03-09 14:13:49 +00007330
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007331 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7332 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007333}
telsoa014fcda012018-03-09 14:13:49 +00007334
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007335LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007336 armnn::IWorkloadFactory& workloadFactory,
7337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007338 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007339{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007340 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7341 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007342}
7343
7344LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
7345 armnn::IWorkloadFactory& workloadFactory,
7346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7347 const armnn::DataLayout layout)
7348{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007349 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007350 layout);
7351}
7352
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007353LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
7354 armnn::IWorkloadFactory& workloadFactory,
7355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7356 const armnn::DataLayout layout)
7357{
7358 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7359 1.f/128, 128, layout);
7360}
7361
Matthew Jackson82b15ed2019-07-25 16:14:30 +01007362LayerTestResult<float, 2> L2Normalization2dShapeTest(
7363 armnn::IWorkloadFactory& workloadFactory,
7364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7365{
7366 const armnn::DataLayout layout = armnn::DataLayout::NHWC;
7367 const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
7368
7369 std::vector<float> inputData
7370 {
7371 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
7372 };
7373 std::vector<float> expectedOutputData
7374 {
7375 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7376 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
7377 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7378 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
7379 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7380 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
7381 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7382 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
7383 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
7384 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
7385 };
7386
7387 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7388 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
7389
7390 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
7391 inputTensorInfo.GetQuantizationScale(),
7392 inputTensorInfo.GetQuantizationOffset(),
7393 inputData));
7394
7395 LayerTestResult<float, 2> result(outputTensorInfo);
7396 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
7397 outputTensorInfo.GetQuantizationScale(),
7398 outputTensorInfo.GetQuantizationOffset(),
7399 expectedOutputData));
7400
7401 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7402 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7403
7404 armnn::L2NormalizationQueueDescriptor descriptor;
7405 descriptor.m_Parameters.m_Eps = 1e-12f;
7406 descriptor.m_Parameters.m_DataLayout = layout;
7407 armnn::WorkloadInfo info;
7408
7409 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7410 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7411
7412 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
7413
7414 inputHandle->Allocate();
7415 outputHandle->Allocate();
7416
7417 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7418
7419 workload->PostAllocationConfigure();
7420 ExecuteWorkload(*workload, memoryManager);
7421
7422 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7423
7424 return result;
7425}
7426
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007427template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7428LayerTestResult<T, 4> L2Normalization3dTestCommon(
7429 armnn::IWorkloadFactory& workloadFactory,
7430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007431 float scale,
7432 int32_t offset,
7433 float outScale,
7434 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007435 const armnn::DataLayout layout)
7436{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007437 // Width: 3
7438 // Height: 4
7439 // Channels: 2
7440 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00007441 unsigned int numberOfBatches = 1;
7442 unsigned int numberOfChannels = 2;
7443 unsigned int height = 4;
7444 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007445
Nina Drozdd41b2592018-11-19 13:03:36 +00007446 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007447 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007448 std::vector<float> inputValues
7449 {
7450 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007451 119.0f, 21.0f, 150.0f,
7452 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007453 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00007454 147.0f, 199.0f, 220.0f,
7455
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007456 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007457 110.0f, 140.0f, 73.0f,
7458 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007459 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007460 162.0f, 12.0f, 161.0f
7461 };
7462 std::vector<float> expectedOutputValues
7463 {
7464 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007465 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007466 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007467 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
7468 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007469 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007470 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007471 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007472 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7473 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7474 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
7475 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
7476 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
7477
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007478 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007479 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
7480 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007481 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007482 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
7483 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007484 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
7485 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007486 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
7487 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
7488 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007489 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007490 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
7491 };
telsoa014fcda012018-03-09 14:13:49 +00007492
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007493 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7494 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007495}
telsoa014fcda012018-03-09 14:13:49 +00007496
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007497LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007498 armnn::IWorkloadFactory& workloadFactory,
7499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007500 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00007501{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007502 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7503 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007504}
7505
7506LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
7507 armnn::IWorkloadFactory& workloadFactory,
7508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7509 const armnn::DataLayout layout)
7510{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007511 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007512 layout);
7513}
7514
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007515LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
7516 armnn::IWorkloadFactory& workloadFactory,
7517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7518 const armnn::DataLayout layout)
7519{
7520 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7521 1.f/128, 128, layout);
7522}
7523
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007524template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7525LayerTestResult<T, 4> L2Normalization4dTestCommon(
7526 armnn::IWorkloadFactory& workloadFactory,
7527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007528 float scale,
7529 int32_t offset,
7530 float outScale,
7531 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007532 const armnn::DataLayout layout)
7533{
7534 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007535 // Height: 4
7536 // Channels: 3
7537 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00007538 unsigned int numberOfBatches = 2;
7539 unsigned int numberOfChannels = 3;
7540 unsigned int height = 4;
7541 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00007542
Nina Drozdd41b2592018-11-19 13:03:36 +00007543 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00007544 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007545 std::vector<float> inputValues
7546 {
7547 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007548 235.0f, 46.0f, 178.0f,
7549 100.0f, 123.0f, 19.0f,
7550 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007551 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00007552
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007553 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007554 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007555 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00007556 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007557 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00007558
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007559 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007560 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00007561 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007562 12.0f, 209.0f, 200.0f,
7563 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00007564
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007565 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007566 67.0f, 90.0f, 49.0f,
7567 7.0f, 163.0f, 18.0f,
7568 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00007569 247.0f, 59.0f, 189.0f,
7570
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007571 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007572 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007573 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00007574 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007575 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00007576
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007577 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007578 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00007579 115.0f, 116.0f, 238.0f,
7580 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007581 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007582 };
7583 std::vector<float> expectedOutputValues
7584 {
7585 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007586 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007587 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007588 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7589 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7590 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007591 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007592 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007593 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007594 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007595 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007596 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007597 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007598
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007599 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007600 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007601 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007602 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007603 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007604 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007605 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007606 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7607 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7608 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007609 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7610 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7611 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007612
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007613 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007614 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007615 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7616 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7617 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007618 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007619 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007620 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007621 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7622 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007623 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7624 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7625 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007626
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007627 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007628 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7629 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7630 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7631 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007632 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007633 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7634 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007635 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7636 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7637 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007638 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007639 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7640
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007641 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007642 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7643 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7644 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007645 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007646 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7647 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7648 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7649 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007650 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7651 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007652 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007653 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007654
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007655 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007656 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007657 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7658 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7659 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7660 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7661 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7662 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007663 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007664 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007665 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007666 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007667 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007668 };
telsoa014fcda012018-03-09 14:13:49 +00007669
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007670 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7671 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007672}
7673
7674LayerTestResult<float, 4> L2Normalization4dTest(
7675 armnn::IWorkloadFactory& workloadFactory,
7676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7677 const armnn::DataLayout layout)
7678{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007679 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7680 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007681}
7682
7683LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7684 armnn::IWorkloadFactory& workloadFactory,
7685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7686 const armnn::DataLayout layout)
7687{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007688 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007689 layout);
telsoa014fcda012018-03-09 14:13:49 +00007690}
7691
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007692LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7693 armnn::IWorkloadFactory& workloadFactory,
7694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7695 const armnn::DataLayout layout)
7696{
7697 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7698 1.f/128, 128, layout);
7699}
7700
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007701template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007702LayerTestResult<T, 4> ConstantTestImpl(
7703 armnn::IWorkloadFactory& workloadFactory,
7704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007705 float qScale,
7706 int32_t qOffset)
7707{
7708 constexpr unsigned int inputWidth = 3;
7709 constexpr unsigned int inputHeight = 4;
7710 constexpr unsigned int inputChannels = 3;
7711 constexpr unsigned int inputBatchSize = 2;
7712
7713 constexpr unsigned int outputWidth = inputWidth;
7714 constexpr unsigned int outputHeight = inputHeight;
7715 constexpr unsigned int outputChannels = inputChannels;
7716 constexpr unsigned int outputBatchSize = inputBatchSize;
7717
Nina Drozd58ef2c62019-05-16 12:09:18 +01007718 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7719 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007720
Nina Drozd58ef2c62019-05-16 12:09:18 +01007721 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7722 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007723
7724 // Set quantization parameters if the requested type is a quantized type.
7725 if(armnn::IsQuantizedType<T>())
7726 {
7727 inputTensorInfo.SetQuantizationScale(qScale);
7728 inputTensorInfo.SetQuantizationOffset(qOffset);
7729 outputTensorInfo.SetQuantizationScale(qScale);
7730 outputTensorInfo.SetQuantizationOffset(qOffset);
7731 }
7732
7733 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7734 QuantizedVector<T>(qScale, qOffset, {
7735 // Batch 0, Channel 0
7736 235.0f, 46.0f, 178.0f,
7737 100.0f, 123.0f, 19.0f,
7738 172.0f, 74.0f, 250.0f,
7739 6.0f, 195.0f, 80.0f,
7740
7741 // Batch 0, Channel 1
7742 113.0f, 95.0f, 202.0f,
7743 77.0f, 114.0f, 71.0f,
7744 122.0f, 246.0f, 166.0f,
7745 82.0f, 28.0f, 37.0f,
7746
7747 // Batch 0, Channel 2
7748 56.0f, 170.0f, 162.0f,
7749 194.0f, 89.0f, 254.0f,
7750 12.0f, 209.0f, 200.0f,
7751 1.0f, 64.0f, 54.0f,
7752
7753 // Batch 1, Channel 0
7754 67.0f, 90.0f, 49.0f,
7755 7.0f, 163.0f, 18.0f,
7756 25.0f, 117.0f, 103.0f,
7757 247.0f, 59.0f, 189.0f,
7758
7759 // Batch 1, Channel 1
7760 239.0f, 104.0f, 199.0f,
7761 17.0f, 124.0f, 153.0f,
7762 222.0f, 217.0f, 75.0f,
7763 32.0f, 126.0f, 21.0f,
7764
7765 // Batch 1, Channel 2
7766 97.0f, 145.0f, 215.0f,
7767 115.0f, 116.0f, 238.0f,
7768 226.0f, 16.0f, 132.0f,
7769 92.0f, 125.0f, 88.0f,
7770 })));
7771
7772 LayerTestResult<T, 4> result(outputTensorInfo);
7773 result.outputExpected = input;
7774
7775 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7776
7777 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7778 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7779
7780 armnn::ConstantQueueDescriptor descriptor;
7781 descriptor.m_LayerOutput = &constantTensor;
7782
7783 armnn::WorkloadInfo info;
7784 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7785
7786 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7787
7788 outputHandle->Allocate();
7789
Derek Lambertif30f7d32019-04-09 10:25:02 +01007790 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007791 workload->Execute();
7792
7793 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7794 return result;
7795}
7796
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007797LayerTestResult<float, 4> ConstantTest(
7798 armnn::IWorkloadFactory& workloadFactory,
7799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007801 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007802}
7803
Nina Drozd58ef2c62019-05-16 12:09:18 +01007804LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7805 armnn::IWorkloadFactory& workloadFactory,
7806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7807{
7808 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7809}
7810
7811LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007812 armnn::IWorkloadFactory& workloadFactory,
7813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007815 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007816}
7817
Jim Flynn4ed6c832019-05-20 11:02:46 +01007818LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007819 armnn::IWorkloadFactory& workloadFactory,
7820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7821{
7822 unsigned int outputWidth = 3;
7823 unsigned int outputHeight = 6;
7824 unsigned int outputChannels = 3;
7825
7826 unsigned int inputWidth1 = 3;
7827 unsigned int inputHeight1 = 6;
7828 unsigned int inputChannels1 = 2;
7829
7830 unsigned int inputWidth2 = 3;
7831 unsigned int inputHeight2 = 6;
7832 unsigned int inputChannels2 = 1;
7833
7834 // Defines the tensor descriptors.
7835 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7836 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7837 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7838
7839 // Quantized input1 tensor. Range [-3, 1]
7840 const float inputScale1 = 0.015686f;
7841 const int32_t inputOffset1 = 192;
7842
7843 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7844 {
7845 1, 2, 3,
7846 4, 5, 6,
7847 7, 8, 9,
7848 10, 11, 12,
7849 13, 14, 15,
7850 16, 17, 18,
7851
7852 19, 20, 21,
7853 22, 23, 24,
7854 25, 26, 27,
7855 28, 29, 30,
7856 31, 32, 33,
7857 34, 35, 36,
7858 })
7859 );
7860
7861 // Quatized input2 tensor. Range [-1, 4]
7862 const float inputScale2 = 0.019608f;
7863 const int32_t inputOffset2 = 50;
7864
7865 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7866 {
7867 37, 38, 39,
7868 40, 41, 42,
7869 43, 44, 45,
7870 46, 47, 48,
7871 49, 50, 51,
7872 52, 53, 54,
7873 })
7874 );
7875
7876 // Output has the same quantization parameters than input1,
7877 // so that only the requantization of input2 is required
7878 const float outputScale = 0.015686f;
7879 const int32_t outputOffset = 192;
7880
7881 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7882
7883 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7884 {
7885 1, 2, 3,
7886 4, 5, 6,
7887 7, 8, 9,
7888 10, 11, 12,
7889 13, 14, 15,
7890 16, 17, 18,
7891
7892 19, 20, 21,
7893 22, 23, 24,
7894 25, 26, 27,
7895 28, 29, 30,
7896 31, 32, 33,
7897 34, 35, 36,
7898
7899 176, 177, 178,
7900 179, 181, 182,
7901 183, 184, 186,
7902 187, 188, 189,
7903 191, 192, 193,
7904 195, 196, 197,
7905 })
7906 );
7907
7908 outputTensorInfo.SetQuantizationScale(outputScale);
7909 outputTensorInfo.SetQuantizationOffset(outputOffset);
7910 inputTensorInfo1.SetQuantizationScale(inputScale1);
7911 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7912 inputTensorInfo2.SetQuantizationScale(inputScale2);
7913 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7914
7915 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007916 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007917
7918 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007919 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007920
7921 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7922
7923 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7924
7925 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7926 subTensorsSupported ?
7927 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7928 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7929
7930 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7931 subTensorsSupported ?
7932 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7933 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7934
Jim Flynne242f2d2019-05-22 14:24:13 +01007935 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007936 armnn::WorkloadInfo info;
7937 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7938 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7939 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7940
7941 data.m_ViewOrigins.push_back(window1);
7942 data.m_ViewOrigins.push_back(window2);
7943
Jim Flynn4ed6c832019-05-20 11:02:46 +01007944 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007945
7946 inputHandle1->Allocate();
7947 inputHandle2->Allocate();
7948 outputHandle->Allocate();
7949
7950 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7951 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7952
Derek Lambertif30f7d32019-04-09 10:25:02 +01007953 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007954 workload->Execute();
7955
7956 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7957
7958 return ret;
7959}
7960
Jim Flynn4ed6c832019-05-20 11:02:46 +01007961LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007962 armnn::IWorkloadFactory& workloadFactory,
7963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007964{
surmeh013537c2c2018-05-18 16:31:43 +01007965 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007966 unsigned int outputHeight = 6;
7967 unsigned int outputChannels = 3;
7968
surmeh013537c2c2018-05-18 16:31:43 +01007969 unsigned int inputWidth1 = 3;
7970 unsigned int inputHeight1 = 6;
7971 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007972
surmeh013537c2c2018-05-18 16:31:43 +01007973 unsigned int inputWidth2 = 3;
7974 unsigned int inputHeight2 = 6;
7975 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007976
telsoa01c577f2c2018-08-31 09:22:23 +01007977 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007978 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7979 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7980 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007981
Jim Flynn4ed6c832019-05-20 11:02:46 +01007982 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007983 const float scale = 0.13497836f;
7984 const int32_t offset = -7;
7985
7986 outputTensorInfo.SetQuantizationScale(scale);
7987 outputTensorInfo.SetQuantizationOffset(offset);
7988 inputTensorInfo1.SetQuantizationScale(scale);
7989 inputTensorInfo1.SetQuantizationOffset(offset);
7990 inputTensorInfo2.SetQuantizationScale(scale);
7991 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007992
7993 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7994
7995 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007996 {
7997 1, 2, 3,
7998 4, 5, 6,
7999 7, 8, 9,
8000 10, 11, 12,
8001 13, 14, 15,
8002 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00008003
surmeh013537c2c2018-05-18 16:31:43 +01008004 19, 20, 21,
8005 22, 23, 24,
8006 25, 26, 27,
8007 28, 29, 30,
8008 31, 32, 33,
8009 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00008010
surmeh013537c2c2018-05-18 16:31:43 +01008011 37, 38, 39,
8012 40, 41, 42,
8013 43, 44, 45,
8014 46, 47, 48,
8015 49, 50, 51,
8016 52, 53, 54,
8017 })
telsoa014fcda012018-03-09 14:13:49 +00008018 );
8019
telsoa014fcda012018-03-09 14:13:49 +00008020 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
8021 {
surmeh013537c2c2018-05-18 16:31:43 +01008022 1, 2, 3,
8023 4, 5, 6,
8024 7, 8, 9,
8025 10, 11, 12,
8026 13, 14, 15,
8027 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00008028
surmeh013537c2c2018-05-18 16:31:43 +01008029 19, 20, 21,
8030 22, 23, 24,
8031 25, 26, 27,
8032 28, 29, 30,
8033 31, 32, 33,
8034 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00008035 })
8036 );
8037
8038 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
8039 {
surmeh013537c2c2018-05-18 16:31:43 +01008040 37, 38, 39,
8041 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00008042 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01008043 46, 47, 48,
8044 49, 50, 51,
8045 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00008046 })
8047 );
8048
telsoa01c577f2c2018-08-31 09:22:23 +01008049 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01008050 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00008051
telsoa01c577f2c2018-08-31 09:22:23 +01008052 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01008053 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00008054
telsoa014fcda012018-03-09 14:13:49 +00008055
8056 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8057
8058 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8059
8060 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8061 subTensorsSupported ?
8062 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8063 workloadFactory.CreateTensorHandle(inputTensorInfo1);
8064
8065 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8066 subTensorsSupported ?
8067 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8068 workloadFactory.CreateTensorHandle(inputTensorInfo2);
8069
telsoa014fcda012018-03-09 14:13:49 +00008070
Jim Flynne242f2d2019-05-22 14:24:13 +01008071 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00008072 armnn::WorkloadInfo info;
8073 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8074 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00008075 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8076
8077 data.m_ViewOrigins.push_back(window1);
8078 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00008079
Jim Flynn4ed6c832019-05-20 11:02:46 +01008080 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00008081
8082 inputHandle1->Allocate();
8083 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008084 outputHandle->Allocate();
8085
8086 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8087 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008088
Derek Lambertif30f7d32019-04-09 10:25:02 +01008089 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00008090 workload->Execute();
8091
8092 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8093
8094 return ret;
8095}
8096
Jim Flynn4ed6c832019-05-20 11:02:46 +01008097LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01008098 armnn::IWorkloadFactory& workloadFactory,
8099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8100{
8101 unsigned int outputWidth = 3;
8102 unsigned int outputHeight = 6;
8103 unsigned int outputChannels = 3;
8104
8105 unsigned int inputWidth1 = 3;
8106 unsigned int inputHeight1 = 6;
8107 unsigned int inputChannels1 = 2;
8108
8109 unsigned int inputWidth2 = 3;
8110 unsigned int inputHeight2 = 6;
8111 unsigned int inputChannels2 = 1;
8112
8113 // Defines the tensor descriptors.
8114 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
8115 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
8116 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
8117
Jim Flynn4ed6c832019-05-20 11:02:46 +01008118 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01008119 const float scale = 0.13497836f;
8120 const int32_t offset = -7;
8121
8122 outputTensorInfo.SetQuantizationScale(scale);
8123 outputTensorInfo.SetQuantizationOffset(offset);
8124 inputTensorInfo1.SetQuantizationScale(scale);
8125 inputTensorInfo1.SetQuantizationOffset(offset);
8126 inputTensorInfo2.SetQuantizationScale(scale);
8127 inputTensorInfo2.SetQuantizationOffset(offset);
8128
8129 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
8130
8131 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
8132 {
8133 1, 2, 3,
8134 4, 5, 6,
8135 7, 8, 9,
8136 10, 11, 12,
8137 13, 14, 15,
8138 16, 17, 18,
8139
8140 19, 20, 21,
8141 22, 23, 24,
8142 25, 26, 27,
8143 28, 29, 30,
8144 31, 32, 33,
8145 34, 35, 36,
8146
8147 37, 38, 39,
8148 40, 41, 42,
8149 43, 44, 45,
8150 46, 47, 48,
8151 49, 50, 51,
8152 52, 53, 54,
8153 }));
8154
8155 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
8156 {
8157 1, 2, 3,
8158 4, 5, 6,
8159 7, 8, 9,
8160 10, 11, 12,
8161 13, 14, 15,
8162 16, 17, 18,
8163
8164 19, 20, 21,
8165 22, 23, 24,
8166 25, 26, 27,
8167 28, 29, 30,
8168 31, 32, 33,
8169 34, 35, 36,
8170 }));
8171
8172 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
8173 {
8174 37, 38, 39,
8175 40, 41, 42,
8176 43, 44, 45,
8177 46, 47, 48,
8178 49, 50, 51,
8179 52, 53, 54,
8180 }));
8181
8182 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01008183 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008184
8185 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01008186 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008187
8188
8189 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8190
8191 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
8192
8193 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
8194 subTensorsSupported ?
8195 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
8196 workloadFactory.CreateTensorHandle(inputTensorInfo1);
8197
8198 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
8199 subTensorsSupported ?
8200 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
8201 workloadFactory.CreateTensorHandle(inputTensorInfo2);
8202
8203
Jim Flynne242f2d2019-05-22 14:24:13 +01008204 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01008205 armnn::WorkloadInfo info;
8206 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8207 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
8208 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8209
8210 data.m_ViewOrigins.push_back(window1);
8211 data.m_ViewOrigins.push_back(window2);
8212
Jim Flynn4ed6c832019-05-20 11:02:46 +01008213 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01008214
8215 inputHandle1->Allocate();
8216 inputHandle2->Allocate();
8217 outputHandle->Allocate();
8218
8219 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
8220 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
8221
8222 workload->PostAllocationConfigure();
8223 workload->Execute();
8224
8225 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
8226
8227 return ret;
8228}
telsoa014fcda012018-03-09 14:13:49 +00008229
surmeh01bceff2f2018-03-29 16:29:27 +01008230namespace
telsoa014fcda012018-03-09 14:13:49 +00008231{
Sadik Armagan2999a022019-04-09 14:20:12 +01008232template <typename T>
8233LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008234 armnn::IWorkloadFactory& workloadFactory,
8235 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8236 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008237 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008238 float scale0,
8239 int32_t offset0,
8240 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008241 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008242 float scale1,
8243 int32_t offset1,
8244 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01008245 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008246 float outScale,
8247 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01008248{
Sadik Armagan2999a022019-04-09 14:20:12 +01008249 auto dataType = (std::is_same<T, uint8_t>::value ?
8250 armnn::DataType::QuantisedAsymm8 :
8251 armnn::DataType::QuantisedSymm16);
8252
8253 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
8254 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
8255 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00008256
surmeh01bceff2f2018-03-29 16:29:27 +01008257 inputTensorInfo0.SetQuantizationScale(scale0);
8258 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00008259
surmeh01bceff2f2018-03-29 16:29:27 +01008260 inputTensorInfo1.SetQuantizationScale(scale1);
8261 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00008262
surmeh01bceff2f2018-03-29 16:29:27 +01008263 outputTensorInfo.SetQuantizationScale(outScale);
8264 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00008265
Sadik Armagan2999a022019-04-09 14:20:12 +01008266 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8267 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00008268
Sadik Armagan2999a022019-04-09 14:20:12 +01008269 LayerTestResult<T, 4> result(outputTensorInfo);
8270 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8271
8272 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8273 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8274 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8275
8276 armnn::AdditionQueueDescriptor data;
8277 armnn::WorkloadInfo info;
8278 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8279 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8280 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8281
8282 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
8283
8284 inputHandle0->Allocate();
8285 inputHandle1->Allocate();
8286 outputHandle->Allocate();
8287
8288 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8289 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8290
Derek Lambertif30f7d32019-04-09 10:25:02 +01008291 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01008292 workload->Execute();
8293
8294 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8295
8296 return result;
8297}
8298} // anonymous namespace
8299
8300LayerTestResult<uint8_t, 4> AdditionUint8Test(
8301 armnn::IWorkloadFactory& workloadFactory,
8302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8303{
8304 const unsigned int shape0[] = { 1, 2, 2, 3 };
8305 const unsigned int shape1[] = { 1, 2, 2, 3 };
8306
8307 std::vector<uint8_t> input0(
8308 {
8309 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
8310 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
8311 });
8312
8313 std::vector<uint8_t> input1(
8314 {
8315 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8316 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8317 });
8318
8319 std::vector<uint8_t> output(
8320 {
8321 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
8322 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
8323 });
8324
8325 return AdditionQuantizeTestHelper(workloadFactory,
8326 memoryManager,
8327 shape0, input0, 7.0f, 3,
8328 shape1, input1, 7.0f, 3,
8329 shape0, output, 7.0f, 3);
8330}
8331
8332LayerTestResult<int16_t, 4> AdditionInt16Test(
8333 armnn::IWorkloadFactory& workloadFactory,
8334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8335{
8336 const unsigned int shape0[] = { 1, 2, 2, 3 };
8337 const unsigned int shape1[] = { 1, 2, 2, 3 };
8338
8339 std::vector<int16_t> input0(
8340 {
8341 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
8342 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
8343 });
8344
8345 std::vector<int16_t> input1(
8346 {
8347 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
8348 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
8349 });
8350
8351 std::vector<int16_t> output(
8352 {
8353 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
8354 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
8355 });
8356
8357 return AdditionQuantizeTestHelper(workloadFactory,
8358 memoryManager,
8359 shape0, input0, 7.0f, 0,
8360 shape1, input1, 7.0f, 0,
8361 shape0, output, 7.0f, 0);
8362}
8363
8364namespace
8365{
8366template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
8367LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
8368 armnn::IWorkloadFactory& workloadFactory,
8369 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8370 const unsigned int shape0[4],
8371 const std::vector<T> & values0,
8372 float scale0,
8373 int32_t offset0,
8374 const unsigned int shape1[4],
8375 const std::vector<T> & values1,
8376 float scale1,
8377 int32_t offset1,
8378 const unsigned int outShape[4],
8379 const std::vector<T> & outValues,
8380 float outScale,
8381 int32_t outOffset)
8382{
8383 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8384 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8385 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
8386
8387 inputTensorInfo0.SetQuantizationScale(scale0);
8388 inputTensorInfo0.SetQuantizationOffset(offset0);
8389
8390 inputTensorInfo1.SetQuantizationScale(scale1);
8391 inputTensorInfo1.SetQuantizationOffset(offset1);
8392
8393 outputTensorInfo.SetQuantizationScale(outScale);
8394 outputTensorInfo.SetQuantizationOffset(outOffset);
8395
8396 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8397 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8398
8399 LayerTestResult<T, 4> result(outputTensorInfo);
8400 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00008401
surmeh01bceff2f2018-03-29 16:29:27 +01008402 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00008403 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00008404 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8405
8406 armnn::MultiplicationQueueDescriptor data;
8407 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01008408 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8409 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00008410 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8411
8412 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
8413
surmeh01bceff2f2018-03-29 16:29:27 +01008414 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008415 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00008416 outputHandle->Allocate();
8417
surmeh01bceff2f2018-03-29 16:29:27 +01008418 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008419 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00008420
Derek Lambertif30f7d32019-04-09 10:25:02 +01008421 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00008422 workload->Execute();
8423
8424 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8425
8426 return result;
8427}
surmeh01bceff2f2018-03-29 16:29:27 +01008428} // anonymous namespace
8429
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008430LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
8431 armnn::IWorkloadFactory& workloadFactory,
8432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008433{
8434 unsigned int batchSize = 1;
8435 unsigned int channels = 2;
8436 unsigned int height = 2;
8437 unsigned int width = 3;
8438 const unsigned int shape[] = { batchSize, channels, height, width };
8439
telsoa01c577f2c2018-08-31 09:22:23 +01008440 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008441 std::vector<uint8_t> input0({
8442 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
8443 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
8444 });
8445
telsoa01c577f2c2018-08-31 09:22:23 +01008446 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008447 std::vector<uint8_t> input1({
8448 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
8449 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
8450 });
8451
telsoa01c577f2c2018-08-31 09:22:23 +01008452 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01008453 std::vector<uint8_t> output(
8454 {
8455 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
8456 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
8457 });
8458
Sadik Armagan2999a022019-04-09 14:20:12 +01008459 // Scale/offset chosen to have output values out of range.
8460 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8461 memoryManager,
8462 shape,
8463 input0,
8464 4.0f,
8465 1,
8466 shape,
8467 input1,
8468 3.0f,
8469 -2,
8470 shape,
8471 output,
8472 1366.255f,
8473 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01008474}
8475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008476LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
8477 armnn::IWorkloadFactory& workloadFactory,
8478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008479{
8480 const unsigned int shape0[] = { 1, 2, 2, 3 };
8481 const unsigned int shape1[] = { 1, 1, 1, 1 };
8482
8483 std::vector<uint8_t> input0({
8484 1, 2, 3, 4, 5, 6,
8485 7, 8, 9, 10, 11, 12
8486 });
8487
8488 std::vector<uint8_t> input1({2});
8489
8490 std::vector<uint8_t> output({
8491 2, 4, 6, 8, 10, 12,
8492 14, 16, 18, 20, 22, 24
8493 });
8494
Sadik Armagan2999a022019-04-09 14:20:12 +01008495 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8496 memoryManager,
8497 shape0,
8498 input0,
8499 1.0f,
8500 0,
8501 shape1,
8502 input1,
8503 1.0f,
8504 0,
8505 shape0,
8506 output,
8507 1.0f,
8508 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008509}
8510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008511LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
8512 armnn::IWorkloadFactory& workloadFactory,
8513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008514{
8515 const unsigned int shape0[] = { 1, 2, 2, 3 };
8516 const unsigned int shape1[] = { 1, 1, 1, 3 };
8517
8518 std::vector<uint8_t> input0({
8519 1, 2, 3, 4, 5, 6,
8520 7, 8, 9, 10, 11, 12
8521 });
8522
8523 std::vector<uint8_t> input1({1, 2, 3});
8524
8525 std::vector<uint8_t> output({
8526 1, 4, 9, 4, 10, 18,
8527 7, 16, 27, 10, 22, 36
8528 });
8529
Sadik Armagan2999a022019-04-09 14:20:12 +01008530 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8531 memoryManager,
8532 shape0,
8533 input0,
8534 1.0f,
8535 0,
8536 shape1,
8537 input1,
8538 1.0f,
8539 0,
8540 shape0,
8541 output,
8542 1.0f,
8543 0);
8544}
8545
8546LayerTestResult<int16_t, 4> MultiplicationInt16Test(
8547 armnn::IWorkloadFactory& workloadFactory,
8548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8549{
8550 const unsigned int shape[] = { 1, 2, 2, 3 };
8551
8552 std::vector<int16_t> input0(
8553 {
8554 6, 7, 8, 9, 10, 11,
8555 12, 13, 14, 15, 16, 17
8556 });
8557
8558 std::vector<int16_t> input1(
8559 {
8560 1, 2, 3, 4, 5, 6,
8561 7, 8, 9, 10, 11, 12
8562 });
8563
8564 std::vector<int16_t> output(
8565 {
8566 6, 14, 24, 36, 50, 66,
8567 84, 104, 126, 150, 176, 204
8568 });
8569
8570 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8571 memoryManager,
8572 shape,
8573 input0,
8574 1.0f,
8575 0,
8576 shape,
8577 input1,
8578 1.0f,
8579 0,
8580 shape,
8581 output,
8582 1.0f,
8583 0);
8584}
8585
8586LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8587 armnn::IWorkloadFactory& workloadFactory,
8588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8589{
8590 const unsigned int shape0[] = { 1, 2, 2, 3 };
8591 const unsigned int shape1[] = { 1, 1, 1, 1 };
8592
8593 std::vector<int16_t> input0(
8594 {
8595 1, 2, 3, 4, 5, 6,
8596 7, 8, 9, 10, 11, 12
8597 });
8598
8599 std::vector<int16_t> input1({2});
8600
8601 std::vector<int16_t> output(
8602 {
8603 2, 4, 6, 8, 10, 12,
8604 14, 16, 18, 20, 22, 24
8605 });
8606
8607 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8608 memoryManager,
8609 shape0,
8610 input0,
8611 1.0f,
8612 0,
8613 shape1,
8614 input1,
8615 1.0f,
8616 0,
8617 shape0,
8618 output,
8619 1.0f,
8620 0);
8621}
8622
8623LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8624 armnn::IWorkloadFactory& workloadFactory,
8625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8626{
8627 const unsigned int shape0[] = { 1, 2, 2, 3 };
8628 const unsigned int shape1[] = { 1, 1, 1, 3 };
8629
8630 std::vector<int16_t> input0(
8631 {
8632 1, 2, 3, 4, 5, 6,
8633 7, 8, 9, 10, 11, 12
8634 });
8635
8636 std::vector<int16_t> input1({1, 2, 3});
8637
8638 std::vector<int16_t> output(
8639 {
8640 1, 4, 9, 4, 10, 18,
8641 7, 16, 27, 10, 22, 36
8642 });
8643
8644 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8645 memoryManager,
8646 shape0,
8647 input0,
8648 1.0f,
8649 0,
8650 shape1,
8651 input1,
8652 1.0f,
8653 0,
8654 shape0,
8655 output,
8656 1.0f,
8657 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008658}
telsoa014fcda012018-03-09 14:13:49 +00008659
David Beckf195f032018-09-06 16:46:34 +01008660namespace
8661{
Sadik Armagan2999a022019-04-09 14:20:12 +01008662template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008663LayerTestResult<T, 4> SubtractionTestHelper(
8664 armnn::IWorkloadFactory& workloadFactory,
8665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8666 const unsigned int shape0[4],
8667 const std::vector<T>& values0,
8668 float scale0,
8669 int32_t offset0,
8670 const unsigned int shape1[4],
8671 const std::vector<T> & values1,
8672 float scale1,
8673 int32_t offset1,
8674 const unsigned int outShape[4],
8675 const std::vector<T> & outValues,
8676 float outScale,
8677 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008678{
Sadik Armagan2999a022019-04-09 14:20:12 +01008679 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8680 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8681 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008682
8683 inputTensorInfo0.SetQuantizationScale(scale0);
8684 inputTensorInfo0.SetQuantizationOffset(offset0);
8685
8686 inputTensorInfo1.SetQuantizationScale(scale1);
8687 inputTensorInfo1.SetQuantizationOffset(offset1);
8688
8689 outputTensorInfo.SetQuantizationScale(outScale);
8690 outputTensorInfo.SetQuantizationOffset(outOffset);
8691
8692 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8693 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8694
8695 LayerTestResult<T, 4> result(outputTensorInfo);
8696 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8697
8698 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8699 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8700 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8701
8702 armnn::SubtractionQueueDescriptor data;
8703 armnn::WorkloadInfo info;
8704 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8705 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8706 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8707
8708 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8709
8710 inputHandle0->Allocate();
8711 inputHandle1->Allocate();
8712 outputHandle->Allocate();
8713
8714 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8715 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8716
Derek Lambertif30f7d32019-04-09 10:25:02 +01008717 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008718 workload->Execute();
8719
8720 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8721
8722 return result;
8723}
8724} // anonymous namespace
8725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008726LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8727 armnn::IWorkloadFactory& workloadFactory,
8728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008729{
8730 const unsigned int shape0[] = { 1, 1, 2, 2 };
8731 const unsigned int shape1[] = { 1, 1, 2, 2 };
8732
8733 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8734 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8735 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8736
Sadik Armagan2999a022019-04-09 14:20:12 +01008737 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8738 memoryManager,
8739 shape0, input0, 0.5f, 2,
8740 shape1, input1, 1.0f, 0,
8741 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008742}
8743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008744LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8745 armnn::IWorkloadFactory& workloadFactory,
8746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008747{
8748 const unsigned int shape0[] = { 1, 1, 2, 2 };
8749 const unsigned int shape1[] = { 1, 1, 1, 1 };
8750
8751 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8752 std::vector<uint8_t> input1({ 2 });
8753 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8754
Sadik Armagan2999a022019-04-09 14:20:12 +01008755 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8756 memoryManager,
8757 shape0, input0, 0.5f, 2,
8758 shape1, input1, 1.0f, 0,
8759 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008760}
8761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008762LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8763 armnn::IWorkloadFactory& workloadFactory,
8764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008765{
8766 const unsigned int shape0[] = { 1, 1, 2, 2 };
8767 const unsigned int shape1[] = { 1, 1, 2, 1 };
8768
8769 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8770 std::vector<uint8_t> input1({ 2, 1 });
8771 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8772
Sadik Armagan2999a022019-04-09 14:20:12 +01008773 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8774 memoryManager,
8775 shape0, input0, 1.0f, 0,
8776 shape1, input1, 1.0f, 0,
8777 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008778}
8779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008780LayerTestResult<float, 4> SubtractionTest(
8781 armnn::IWorkloadFactory& workloadFactory,
8782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008783{
8784 const unsigned int shape0[] = { 1, 1, 2, 2 };
8785 const unsigned int shape1[] = { 1, 1, 2, 2 };
8786
8787 std::vector<float> input0({ 1, 2, 3, 4 });
8788 std::vector<float> input1({ 1, -1, 0, 2 });
8789 std::vector<float> output({ 0, 3, 3, 2 });
8790
Sadik Armagan2999a022019-04-09 14:20:12 +01008791 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8792 memoryManager,
8793 shape0, input0, 1.0f, 0,
8794 shape1, input1, 1.0f, 0,
8795 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008796}
8797
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008798LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8799 armnn::IWorkloadFactory& workloadFactory,
8800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008801{
8802 const unsigned int shape0[] = { 1, 1, 2, 2 };
8803 const unsigned int shape1[] = { 1, 1, 1, 1 };
8804
8805 std::vector<float> input0({ 1, 2, 3, 4 });
8806 std::vector<float> input1({ 10 });
8807 std::vector<float> output({ -9, -8, -7, -6 });
8808
Sadik Armagan2999a022019-04-09 14:20:12 +01008809 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8810 memoryManager,
8811 shape0, input0, 1.0f, 0,
8812 shape1, input1, 1.0f, 0,
8813 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008814}
8815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008816LayerTestResult<float, 4> SubtractionBroadcastTest(
8817 armnn::IWorkloadFactory& workloadFactory,
8818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008819{
8820 const unsigned int shape0[] = { 1, 1, 2, 2 };
8821 const unsigned int shape1[] = { 1, 1, 1, 2 };
8822
8823 std::vector<float> input0({ 1, 2, 3, 4 });
8824 std::vector<float> input1({ 10, -5 });
8825 std::vector<float> output({ -9, 7, -7, 9 });
8826
Sadik Armagan2999a022019-04-09 14:20:12 +01008827 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8828 memoryManager,
8829 shape0, input0, 1.0f, 0,
8830 shape1, input1, 1.0f, 0,
8831 shape0, output, 1.0f, 0);
8832}
8833
8834LayerTestResult<int16_t, 4> SubtractionInt16Test(
8835 armnn::IWorkloadFactory& workloadFactory,
8836 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8837{
8838 const unsigned int shape0[] = { 1, 1, 2, 2 };
8839 const unsigned int shape1[] = { 1, 1, 2, 2 };
8840
8841 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8842 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8843 std::vector<int16_t> output({ 3, 3, 5, 5 });
8844
8845 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8846 memoryManager,
8847 shape0, input0, 0.5f, 0,
8848 shape1, input1, 1.0f, 0,
8849 shape0, output, 1.0f, 0);
8850}
8851
8852LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8853 armnn::IWorkloadFactory& workloadFactory,
8854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8855{
8856 const unsigned int shape0[] = { 1, 1, 2, 2 };
8857 const unsigned int shape1[] = { 1, 1, 1, 1 };
8858
8859 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8860 std::vector<int16_t> input1({ 2 });
8861 std::vector<int16_t> output({ 3, 4, 5, 6 });
8862
8863 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8864 memoryManager,
8865 shape0, input0, 0.5f, 0,
8866 shape1, input1, 1.0f, 0,
8867 shape0, output, 1.0f, 0);
8868}
8869
8870LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8871 armnn::IWorkloadFactory& workloadFactory,
8872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8873{
8874 const unsigned int shape0[] = { 1, 1, 2, 2 };
8875 const unsigned int shape1[] = { 1, 1, 2, 1 };
8876
8877 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8878 std::vector<int16_t> input1({ 2, 1 });
8879 std::vector<int16_t> output({ 8, 11, 12, 15 });
8880
8881 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8882 memoryManager,
8883 shape0, input0, 1.0f, 0,
8884 shape1, input1, 1.0f, 0,
8885 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008886}
8887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008888LayerTestResult<float, 4> BatchNormTest(
8889 armnn::IWorkloadFactory& workloadFactory,
8890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008891{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008892 // BatchSize: 1
8893 // Channels: 2
8894 // Height: 3
8895 // Width: 2
8896
8897 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8898 std::vector<float> inputValues
8899 {
8900 // Batch 0, Channel 0, Height (3) x Width (2)
8901 1.f, 4.f,
8902 4.f, 2.f,
8903 1.f, 6.f,
8904
8905 // Batch 0, Channel 1, Height (3) x Width (2)
8906 1.f, 1.f,
8907 4.f, 1.f,
8908 -2.f, 4.f
8909 };
8910 std::vector<float> expectedOutputValues
8911 {
8912 // Batch 0, Channel 0, Height (3) x Width (2)
8913 1.f, 4.f,
8914 4.f, 2.f,
8915 1.f, 6.f,
8916
8917 // Batch 0, Channel 1, Height (3) x Width (2)
8918 3.f, 3.f,
8919 4.f, 3.f,
8920 2.f, 4.f
8921 };
8922
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008923 return BatchNormTestImpl<armnn::DataType::Float32>(
8924 workloadFactory, memoryManager,
8925 inputOutputShape, inputValues, expectedOutputValues,
8926 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008927}
8928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008929LayerTestResult<float, 4> BatchNormNhwcTest(
8930 armnn::IWorkloadFactory& workloadFactory,
8931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008932{
8933 // BatchSize: 1
8934 // Height: 3
8935 // Width: 2
8936 // Channels: 2
8937
8938 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8939 std::vector<float> inputValues
8940 {
8941 // Batch 0, Height 0, Width (2) x Channel (2)
8942 1.f, 1.f,
8943 4.f, 1.f,
8944
8945 // Batch 0, Height 1, Width (2) x Channel (2)
8946 4.f, 4.f,
8947 2.f, 1.f,
8948
8949 // Batch 0, Height 2, Width (2) x Channel (2)
8950 1.f, -2.f,
8951 6.f, 4.f
8952 };
8953 std::vector<float> expectedOutputValues
8954 {
8955 // Batch 0, Height 0, Width (2) x Channel (2)
8956 1.f, 3.f,
8957 4.f, 3.f,
8958
8959 // Batch 0, Height 1, Width (2) x Channel (2)
8960 4.f, 4.f,
8961 2.f, 3.f,
8962
8963 // Batch 0, Height 2, Width (2) x Channel (2)
8964 1.f, 2.f,
8965 6.f, 4.f
8966 };
8967
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008968 return BatchNormTestImpl<armnn::DataType::Float32>(
8969 workloadFactory, memoryManager,
8970 inputOutputShape, inputValues, expectedOutputValues,
8971 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008972}
8973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008974LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8975 armnn::IWorkloadFactory& workloadFactory,
8976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008977{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008978 // BatchSize: 1
8979 // Channels: 2
8980 // Height: 3
8981 // Width: 2
8982
8983 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8984 std::vector<float> inputValues
8985 {
8986 // Batch 0, Channel 0, Height (3) x Width (2)
8987 1.f, 4.f,
8988 4.f, 2.f,
8989 1.f, 6.f,
8990
8991 // Batch 0, Channel 1, Height (3) x Width (2)
8992 1.f, 1.f,
8993 4.f, 1.f,
8994 -2.f, 4.f
8995 };
8996 std::vector<float> expectedOutputValues
8997 {
8998 // Batch 0, Channel 0, Height (3) x Width (2)
8999 1.f, 4.f,
9000 4.f, 2.f,
9001 1.f, 6.f,
9002
9003 // Batch 0, Channel 1, Height (3) x Width (2)
9004 3.f, 3.f,
9005 4.f, 3.f,
9006 2.f, 4.f
9007 };
9008
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009009 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
9010 workloadFactory, memoryManager,
9011 inputOutputShape, inputValues, expectedOutputValues,
9012 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01009013}
9014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009015LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
9016 armnn::IWorkloadFactory& workloadFactory,
9017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01009018{
9019 // BatchSize: 1
9020 // Height: 3
9021 // Width: 2
9022 // Channels: 2
9023
9024 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
9025 std::vector<float> inputValues
9026 {
9027 // Batch 0, Height 0, Width (2) x Channel (2)
9028 1.f, 1.f,
9029 4.f, 1.f,
9030
9031 // Batch 0, Height 1, Width (2) x Channel (2)
9032 4.f, 4.f,
9033 2.f, 1.f,
9034
9035 // Batch 0, Height 2, Width (2) x Channel (2)
9036 1.f, -2.f,
9037 6.f, 4.f
9038 };
9039 std::vector<float> expectedOutputValues
9040 {
9041 // Batch 0, Height 0, Width (2) x Channel (2)
9042 1.f, 3.f,
9043 4.f, 3.f,
9044
9045 // Batch 0, Height 1, Width (2) x Channel (2)
9046 4.f, 4.f,
9047 2.f, 3.f,
9048
9049 // Batch 0, Height 2, Width (2) x Channel (2)
9050 1.f, 2.f,
9051 6.f, 4.f
9052 };
9053
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009054 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
9055 (workloadFactory, memoryManager,
9056 inputOutputShape, inputValues, expectedOutputValues,
9057 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00009058}
9059
Matteo Martincighf5507132019-06-04 10:59:47 +01009060LayerTestResult<int16_t, 4> BatchNormInt16Test(
9061 armnn::IWorkloadFactory& workloadFactory,
9062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9063{
9064 // BatchSize: 1
9065 // Channels: 2
9066 // Height: 3
9067 // Width: 2
9068
9069 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
9070 std::vector<float> inputValues
9071 {
9072 // Batch 0, Channel 0, Height (3) x Width (2)
9073 1.f, 4.f,
9074 4.f, 2.f,
9075 1.f, 6.f,
9076
9077 // Batch 0, Channel 1, Height (3) x Width (2)
9078 1.f, 1.f,
9079 4.f, 1.f,
9080 -2.f, 4.f
9081 };
9082 std::vector<float> expectedOutputValues
9083 {
9084 // Batch 0, Channel 0, Height (3) x Width (2)
9085 1.f, 4.f,
9086 4.f, 2.f,
9087 1.f, 6.f,
9088
9089 // Batch 0, Channel 1, Height (3) x Width (2)
9090 3.f, 3.f,
9091 4.f, 3.f,
9092 2.f, 4.f
9093 };
9094
9095 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
9096 workloadFactory, memoryManager,
9097 inputOutputShape, inputValues, expectedOutputValues,
9098 1.f/20.f, 50, armnn::DataLayout::NCHW);
9099}
9100
9101LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
9102 armnn::IWorkloadFactory& workloadFactory,
9103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9104{
9105 // BatchSize: 1
9106 // Height: 3
9107 // Width: 2
9108 // Channels: 2
9109
9110 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
9111 std::vector<float> inputValues
9112 {
9113 // Batch 0, Height 0, Width (2) x Channel (2)
9114 1.f, 1.f,
9115 4.f, 1.f,
9116
9117 // Batch 0, Height 1, Width (2) x Channel (2)
9118 4.f, 4.f,
9119 2.f, 1.f,
9120
9121 // Batch 0, Height 2, Width (2) x Channel (2)
9122 1.f, -2.f,
9123 6.f, 4.f
9124 };
9125 std::vector<float> expectedOutputValues
9126 {
9127 // Batch 0, Height 0, Width (2) x Channel (2)
9128 1.f, 3.f,
9129 4.f, 3.f,
9130
9131 // Batch 0, Height 1, Width (2) x Channel (2)
9132 4.f, 4.f,
9133 2.f, 3.f,
9134
9135 // Batch 0, Height 2, Width (2) x Channel (2)
9136 1.f, 2.f,
9137 6.f, 4.f
9138 };
9139
9140 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
9141 (workloadFactory, memoryManager,
9142 inputOutputShape, inputValues, expectedOutputValues,
9143 1.f/20.f, 50, armnn::DataLayout::NHWC);
9144}
9145
Nina Drozd58ef2c62019-05-16 12:09:18 +01009146LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009147 armnn::IWorkloadFactory& workloadFactory,
9148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009149{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009150 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00009151}
9152
Nina Drozd58ef2c62019-05-16 12:09:18 +01009153LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
9154 armnn::IWorkloadFactory& workloadFactory,
9155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9156{
9157 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
9158}
9159
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009160LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
9161 armnn::IWorkloadFactory& workloadFactory,
9162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009163{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009164 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009165}
9166
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009167LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
9168 armnn::IWorkloadFactory& workloadFactory,
9169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009170{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009171 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009172}
9173
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009174LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
9175 armnn::IWorkloadFactory& workloadFactory,
9176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009177{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009178 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009179}
9180
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009181LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
9182 armnn::IWorkloadFactory& workloadFactory,
9183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009184{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009185 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9186 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009187}
9188
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009189LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
9190 armnn::IWorkloadFactory& workloadFactory,
9191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009193 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9194 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009195}
9196
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009197LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
9198 armnn::IWorkloadFactory& workloadFactory,
9199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009200{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009201 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009202}
9203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009204LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
9205 armnn::IWorkloadFactory& workloadFactory,
9206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009207{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009208 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009209}
9210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009211LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
9212 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9214 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00009215{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009216 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9217 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009218}
9219
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009220LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
9221 armnn::IWorkloadFactory& workloadFactory,
9222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009223{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009224 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009225}
9226
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009227LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
9228 armnn::IWorkloadFactory& workloadFactory,
9229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009230{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009231 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9232 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00009233}
9234
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009235LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
9236 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00009237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9238 bool useSubtensor)
9239{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009240 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
9241 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009242}
9243
9244LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
9245 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009246 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009247{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009248 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009249}
9250
9251LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
9252 armnn::IWorkloadFactory& workloadFactory,
9253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9254{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009255 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009256}
9257
9258LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
9259 armnn::IWorkloadFactory& workloadFactory,
9260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9261{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009262 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009263}
9264
9265LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
9266 armnn::IWorkloadFactory& workloadFactory,
9267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
9268{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009269 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9270 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00009271}
9272
9273LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
9274 armnn::IWorkloadFactory& workloadFactory,
9275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9276{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009277 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
9278 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009279}
9280
9281LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
9282 armnn::IWorkloadFactory& workloadFactory,
9283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9284{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009285 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
9286 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009287}
9288
9289LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
9290 armnn::IWorkloadFactory& workloadFactory,
9291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9292{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009293 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
9294 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00009295}
9296
9297LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
9298 armnn::IWorkloadFactory& workloadFactory,
9299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9300 bool useSubtensor)
9301{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009302 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
9303 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00009304}
9305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009306LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
9307 armnn::IWorkloadFactory& workloadFactory,
9308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9309 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009311 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
9312 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009313}
9314
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009315LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
9316 armnn::IWorkloadFactory& workloadFactory,
9317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9318 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009319{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009320 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009321 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00009322}
9323
Teresa Charlin0434df62019-06-06 13:40:35 +01009324LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
9325 armnn::IWorkloadFactory& workloadFactory,
9326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9327 bool forceNoPadding)
9328{
9329 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
9330 workloadFactory, memoryManager, forceNoPadding);
9331}
9332
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009333LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
9334 armnn::IWorkloadFactory& workloadFactory,
9335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9336 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009337{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009338 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
9339 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00009340}
9341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009342LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
9343 armnn::IWorkloadFactory& workloadFactory,
9344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9345 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00009346{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009347 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009348 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009349}
9350
Teresa Charlin0434df62019-06-06 13:40:35 +01009351LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
9352 armnn::IWorkloadFactory& workloadFactory,
9353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9354 bool forceNoPadding)
9355{
9356 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
9357 workloadFactory, memoryManager, forceNoPadding);
9358}
9359
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009360LayerTestResult<float, 4> SimpleMaxPooling2dTest(
9361 armnn::IWorkloadFactory& workloadFactory,
9362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009363 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009364{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009365 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009366}
9367
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009368LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
9369 armnn::IWorkloadFactory& workloadFactory,
9370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009371 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01009372{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009373 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01009374}
9375
Teresa Charlin0434df62019-06-06 13:40:35 +01009376LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
9377 armnn::IWorkloadFactory& workloadFactory,
9378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9379 const armnn::DataLayout dataLayout)
9380{
9381 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9382}
9383LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
9384 armnn::IWorkloadFactory& workloadFactory,
9385 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9386{
9387 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9388}
9389
9390LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
9391 armnn::IWorkloadFactory& workloadFactory,
9392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9393{
9394 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9395 workloadFactory, memoryManager, 1.0f, -5);
9396}
9397
9398LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
9399 armnn::IWorkloadFactory& workloadFactory,
9400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9401{
9402 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9403 workloadFactory, memoryManager);
9404}
9405
9406LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
9407 armnn::IWorkloadFactory& workloadFactory,
9408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9409{
9410 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9411}
9412
9413LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
9414 armnn::IWorkloadFactory& workloadFactory,
9415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9416{
9417 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9418 workloadFactory, memoryManager, 1.0f, -5);
9419}
9420
9421LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
9422 armnn::IWorkloadFactory& workloadFactory,
9423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9424{
9425 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9426 workloadFactory, memoryManager);
9427}
9428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009429LayerTestResult<float, 4> SimpleAveragePooling2dTest(
9430 armnn::IWorkloadFactory& workloadFactory,
9431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009432 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009433{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009434 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01009435}
9436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009437LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
9438 armnn::IWorkloadFactory& workloadFactory,
9439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009440 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01009441{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009442 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009443 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009444}
9445
Teresa Charlin0434df62019-06-06 13:40:35 +01009446LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
9447 armnn::IWorkloadFactory& workloadFactory,
9448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9449 const armnn::DataLayout dataLayout)
9450{
9451 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9452 workloadFactory, memoryManager, dataLayout);
9453}
9454
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009455LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
9456 armnn::IWorkloadFactory& workloadFactory,
9457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9458 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01009459{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009460 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009461 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01009462}
9463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009464LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
9465 armnn::IWorkloadFactory& workloadFactory,
9466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009467{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009468 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009469}
9470
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009471LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
9472 armnn::IWorkloadFactory& workloadFactory,
9473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009474{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009475 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9476 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00009477}
9478
Teresa Charlin0434df62019-06-06 13:40:35 +01009479LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
9480 armnn::IWorkloadFactory& workloadFactory,
9481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9482{
9483 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9484 workloadFactory, memoryManager);
9485}
9486LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
9487 armnn::IWorkloadFactory& workloadFactory,
9488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9489{
9490 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9491}
9492
9493LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
9494 armnn::IWorkloadFactory& workloadFactory,
9495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9496{
9497 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
9498 workloadFactory, memoryManager);
9499}
9500
9501LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
9502 armnn::IWorkloadFactory& workloadFactory,
9503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9504{
9505 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9506 workloadFactory, memoryManager);
9507}
9508
9509LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
9510 armnn::IWorkloadFactory& workloadFactory,
9511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9512{
9513 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
9514 workloadFactory, memoryManager);
9515}
9516
9517LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
9518 armnn::IWorkloadFactory& workloadFactory,
9519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9520{
9521 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
9522 workloadFactory, memoryManager);
9523}
9524
9525LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
9526 armnn::IWorkloadFactory& workloadFactory,
9527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9528{
9529 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
9530 workloadFactory, memoryManager);
9531}
9532
9533LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
9534 armnn::IWorkloadFactory& workloadFactory,
9535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9536{
9537 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9538}
9539
9540LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
9541 armnn::IWorkloadFactory& workloadFactory,
9542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9543{
9544 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
9545 workloadFactory, memoryManager);
9546}
9547
9548LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
9549 armnn::IWorkloadFactory& workloadFactory,
9550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9551{
9552 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9553 workloadFactory, memoryManager);
9554}
9555
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009556LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9557 armnn::IWorkloadFactory& workloadFactory,
9558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009559 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009560{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009561 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009562}
9563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009564LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9565 armnn::IWorkloadFactory& workloadFactory,
9566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009567 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009569 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009570}
9571
Teresa Charlin0434df62019-06-06 13:40:35 +01009572LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9573 armnn::IWorkloadFactory& workloadFactory,
9574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9575 const armnn::DataLayout dataLayout)
9576{
9577 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9578}
9579
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009580LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9581 armnn::IWorkloadFactory& workloadFactory,
9582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009583{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009584 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009585}
9586
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009587LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9588 armnn::IWorkloadFactory& workloadFactory,
9589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009590{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009591 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009592}
9593
Teresa Charlin0434df62019-06-06 13:40:35 +01009594LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9595 armnn::IWorkloadFactory& workloadFactory,
9596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9597{
9598 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9599}
9600
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009601LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9602 armnn::IWorkloadFactory& workloadFactory,
9603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009604{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009605 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009606}
9607
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009608LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9609 armnn::IWorkloadFactory& workloadFactory,
9610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009612 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009613}
9614
Teresa Charlin0434df62019-06-06 13:40:35 +01009615LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9616 armnn::IWorkloadFactory& workloadFactory,
9617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9618{
9619 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9620}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009621LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9622 armnn::IWorkloadFactory& workloadFactory,
9623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009624{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009625 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009626}
9627
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009628LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9629 armnn::IWorkloadFactory& workloadFactory,
9630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009631{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009632 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009633}
9634
Teresa Charlin0434df62019-06-06 13:40:35 +01009635LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9636 armnn::IWorkloadFactory& workloadFactory,
9637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9638{
9639 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9640}
9641
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009642LayerTestResult<float, 4> L2Pooling2dSize7Test(
9643 armnn::IWorkloadFactory& workloadFactory,
9644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009645{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009646 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009647}
9648
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009649LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9650 armnn::IWorkloadFactory& workloadFactory,
9651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009652{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009653 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009654}
9655
Teresa Charlin0434df62019-06-06 13:40:35 +01009656LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9657 armnn::IWorkloadFactory& workloadFactory,
9658 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9659{
9660 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9661}
9662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009663LayerTestResult<float, 4> L2Pooling2dSize9Test(
9664 armnn::IWorkloadFactory& workloadFactory,
9665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009666{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009667 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009668}
9669
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009670LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9671 armnn::IWorkloadFactory& workloadFactory,
9672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009673{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009674 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009675}
9676
Teresa Charlin0434df62019-06-06 13:40:35 +01009677LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9678 armnn::IWorkloadFactory& workloadFactory,
9679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9680{
9681 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9682}
9683LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9684 armnn::IWorkloadFactory& workloadFactory,
9685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9686{
9687 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9688}
9689
9690LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9691 armnn::IWorkloadFactory& workloadFactory,
9692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9693{
9694 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9695}
9696
9697LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9698 armnn::IWorkloadFactory& workloadFactory,
9699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9700{
9701 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9702}
9703
9704LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9705 armnn::IWorkloadFactory& workloadFactory,
9706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9707{
9708 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9709}
9710
9711LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9712 armnn::IWorkloadFactory& workloadFactory,
9713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9714{
9715 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9716}
9717
9718LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9719 armnn::IWorkloadFactory& workloadFactory,
9720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9721{
9722 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9723}
9724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009725LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9726 armnn::IWorkloadFactory& workloadFactory,
9727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009729 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009730}
9731
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009732LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9733 armnn::IWorkloadFactory& workloadFactory,
9734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009735{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009736 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009737}
9738
Teresa Charlin0434df62019-06-06 13:40:35 +01009739LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9740 armnn::IWorkloadFactory& workloadFactory,
9741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9742{
9743 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9744}
9745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009746LayerTestResult<float, 4> ComparePooling2dTest(
9747 armnn::IWorkloadFactory& workloadFactory,
9748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9749 armnn::IWorkloadFactory& refWorkloadFactory,
9750 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009752 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009753 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009754}
9755
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009756LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9757 armnn::IWorkloadFactory& workloadFactory,
9758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9759 armnn::IWorkloadFactory& refWorkloadFactory,
9760 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009762 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009763 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009764}
9765
Teresa Charlin0434df62019-06-06 13:40:35 +01009766LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9767 armnn::IWorkloadFactory& workloadFactory,
9768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9769 armnn::IWorkloadFactory& refWorkloadFactory,
9770 armnn::PoolingAlgorithm poolingType)
9771{
9772 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9773 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9774}
9775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009776LayerTestResult<float, 2> FullyConnectedLargeTest(
9777 armnn::IWorkloadFactory& workloadFactory,
9778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9779 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009780{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009781 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009782}
9783
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009784LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9785 armnn::IWorkloadFactory& workloadFactory,
9786 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009787{
9788 // Create Initial Tensor
9789 // 1, 2, 3
9790 // 4, 5, 6
9791 // 7, 8, 9
9792
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009793 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9794 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009795
9796 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9797 {1, 2, 3,
9798 4, 5, 6,
9799 7, 8, 9
9800 });
9801
9802 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9803 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9804 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9805 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9806
9807 // Apply MaxPool poolSize = 1x1, stride=2x2
9808 // Result =
9809 // 1, 3
9810 // 7, 9
9811 armnn::Pooling2dDescriptor descriptor;
9812 descriptor.m_PoolHeight = 1;
9813 descriptor.m_PoolWidth = 1;
9814 descriptor.m_StrideX = 2;
9815 descriptor.m_StrideY = 2;
9816 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9817
9818 armnn::Pooling2dQueueDescriptor queueDescriptor;
9819 queueDescriptor.m_Parameters = descriptor;
9820 armnn::WorkloadInfo workloadInfo;
9821 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9822 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9823
9824 // Create the MaxPool
9825 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9826
9827 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9828 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9829 boost::multi_array<float, 4> resultMaxPool;
9830 resultMaxPool.resize(shape);
9831
9832
9833 // Create addition with another tensor the same size
9834 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9835 // with the initial tensor.
9836 // 12, 16
9837 // 24, 28
9838
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009839 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9840 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009841
9842 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9843 {12, 16,
9844 24, 28,
9845 });
9846
9847 // Expected output tensor after MaxPool and Addition.
9848 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9849 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9850 {
9851 13, 19,
9852 31, 37
9853 }));
9854
9855 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9856 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9857
9858 armnn::AdditionQueueDescriptor data;
9859 armnn::WorkloadInfo info;
9860
9861 // Add the output of the MaxPool and the new tensor
9862 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9863 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9864 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9865
9866 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9867
9868 poolingInputHandle->Allocate();
9869 poolingOutputHandle->Allocate();
9870 addInputHandle->Allocate();
9871 addOutputHandle->Allocate();
9872
9873 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9874 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9875
9876 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9877 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9878
Derek Lambertif30f7d32019-04-09 10:25:02 +01009879 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009880 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009881 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009882 addWorkload->Execute();
9883
9884 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9885
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009886 return addRet;
9887}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009889LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9890 armnn::IWorkloadFactory& workloadFactory,
9891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009892{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009893 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009894}
9895
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009896LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9897 armnn::IWorkloadFactory& workloadFactory,
9898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009899{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009900 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009901}
9902
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009903LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9904 armnn::IWorkloadFactory& workloadFactory,
9905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009906{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009907 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009908}
9909
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009910LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9911 armnn::IWorkloadFactory& workloadFactory,
9912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009913{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009914 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009915}
9916
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009917LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9918 armnn::IWorkloadFactory& workloadFactory,
9919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009920{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009921 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009922}
9923
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009924LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9925 armnn::IWorkloadFactory& workloadFactory,
9926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009927{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009928 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009929}
9930
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009931LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9932 armnn::IWorkloadFactory& workloadFactory,
9933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009934{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009935 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009936}
9937
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009938LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9939 armnn::IWorkloadFactory& workloadFactory,
9940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009941{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009942 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009943}
9944
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009945LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9946 armnn::IWorkloadFactory& workloadFactory,
9947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009948{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009949 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009950}
9951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009952LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9953 armnn::IWorkloadFactory& workloadFactory,
9954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009955{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009956 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009957}
9958
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009959LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9960 armnn::IWorkloadFactory& workloadFactory,
9961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009962{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009963 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009964}
9965
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009966LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9967 armnn::IWorkloadFactory& workloadFactory,
9968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009969{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009970 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009971}
9972
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009973LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9974 armnn::IWorkloadFactory& workloadFactory,
9975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009976{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009977 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009978}
9979
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009980LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9981 armnn::IWorkloadFactory& workloadFactory,
9982 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009984 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009985}
9986
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009987LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9988 armnn::IWorkloadFactory& workloadFactory,
9989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009990{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009991 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009992}
9993
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009994LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9995 armnn::IWorkloadFactory& workloadFactory,
9996 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009997{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009998 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009999}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +000010000
nikraj01120522a2019-05-31 11:33:07 +010010001LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
10002 armnn::IWorkloadFactory& workloadFactory,
10003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10004{
10005 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10006}
10007
10008LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
10009 armnn::IWorkloadFactory& workloadFactory,
10010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10011{
10012 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10013}
10014
10015LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
10016 armnn::IWorkloadFactory& workloadFactory,
10017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10018{
10019 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10020}
10021
10022LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
10023 armnn::IWorkloadFactory& workloadFactory,
10024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10025{
10026 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10027}
10028
10029LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
10030 armnn::IWorkloadFactory& workloadFactory,
10031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10032{
10033 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10034}
10035
10036LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
10037 armnn::IWorkloadFactory& workloadFactory,
10038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10039{
10040 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10041}
10042
10043LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
10044 armnn::IWorkloadFactory& workloadFactory,
10045 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10046{
10047 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10048}
10049
10050LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
10051 armnn::IWorkloadFactory& workloadFactory,
10052 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10053{
10054 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10055}
10056
Keith Davisa57eccb2019-06-14 17:33:22 +010010057LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
10058 armnn::IWorkloadFactory& workloadFactory,
10059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10060{
James Conroyd2aa85e2019-07-01 17:12:40 +010010061 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +010010062 workloadFactory,
10063 memoryManager);
10064}
10065
10066LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
10067 armnn::IWorkloadFactory& workloadFactory,
10068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10069{
James Conroyd2aa85e2019-07-01 17:12:40 +010010070 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +010010071 workloadFactory,
10072 memoryManager,
10073 armnn::DataLayout::NCHW);
10074}
10075
James Conroyd2aa85e2019-07-01 17:12:40 +010010076LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +010010077 armnn::IWorkloadFactory& workloadFactory,
10078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10079{
James Conroyd2aa85e2019-07-01 17:12:40 +010010080 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +010010081 workloadFactory,
10082 memoryManager);
10083}
10084
James Conroyd2aa85e2019-07-01 17:12:40 +010010085LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +010010086 armnn::IWorkloadFactory& workloadFactory,
10087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10088{
James Conroyd2aa85e2019-07-01 17:12:40 +010010089 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
10090 workloadFactory,
10091 memoryManager,
10092 armnn::DataLayout::NCHW);
10093}
10094
10095LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
10096 armnn::IWorkloadFactory& workloadFactory,
10097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10098{
10099 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
10100 workloadFactory,
10101 memoryManager);
10102}
10103
10104LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
10105 armnn::IWorkloadFactory& workloadFactory,
10106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10107{
10108 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
10109 workloadFactory,
10110 memoryManager,
10111 armnn::DataLayout::NCHW);
10112}
10113
10114LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
10115 armnn::IWorkloadFactory& workloadFactory,
10116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10117{
10118 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
10119 workloadFactory,
10120 memoryManager);
10121}
10122
10123LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
10124 armnn::IWorkloadFactory& workloadFactory,
10125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10126{
10127 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +010010128 workloadFactory,
10129 memoryManager,
10130 armnn::DataLayout::NCHW);
10131}
10132
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +000010133namespace {
10134
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +000010135} // anonymous namespace
10136
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010137LayerTestResult<float, 4> StridedSlice4DFloat32Test(
10138 armnn::IWorkloadFactory& workloadFactory,
10139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10140{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010141 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010142}
10143
10144LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
10145 armnn::IWorkloadFactory& workloadFactory,
10146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10147{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010148 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010149}
10150
10151LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
10152 armnn::IWorkloadFactory& workloadFactory,
10153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10154{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010155 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010156}
10157
10158LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
10159 armnn::IWorkloadFactory& workloadFactory,
10160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010162 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010163}
10164
10165LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
10166 armnn::IWorkloadFactory& workloadFactory,
10167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10168{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010169 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010170}
10171
10172LayerTestResult<float, 3> StridedSlice3DFloat32Test(
10173 armnn::IWorkloadFactory& workloadFactory,
10174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010176 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010177}
10178
10179LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
10180 armnn::IWorkloadFactory& workloadFactory,
10181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10182{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010183 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010184}
10185
10186LayerTestResult<float, 2> StridedSlice2DFloat32Test(
10187 armnn::IWorkloadFactory& workloadFactory,
10188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10189{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010190 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010191}
10192
10193LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
10194 armnn::IWorkloadFactory& workloadFactory,
10195 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10196{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010197 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010198}
10199
10200LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
10201 armnn::IWorkloadFactory& workloadFactory,
10202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10203{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010204 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010205}
10206
10207LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
10208 armnn::IWorkloadFactory& workloadFactory,
10209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10210{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010211 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010212}
10213
10214LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
10215 armnn::IWorkloadFactory& workloadFactory,
10216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10217{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010218 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010219}
10220
10221LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
10222 armnn::IWorkloadFactory& workloadFactory,
10223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10224{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010225 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010226}
10227
10228LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
10229 armnn::IWorkloadFactory& workloadFactory,
10230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10231{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010232 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010233}
10234
10235LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
10236 armnn::IWorkloadFactory& workloadFactory,
10237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10238{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010239 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010240}
10241
10242LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
10243 armnn::IWorkloadFactory& workloadFactory,
10244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10245{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010246 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010247}
10248
10249LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
10250 armnn::IWorkloadFactory& workloadFactory,
10251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10252{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010253 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010254}
10255
10256LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
10257 armnn::IWorkloadFactory& workloadFactory,
10258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10259{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010260 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000010261}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010262
Matteo Martincigh42666a12019-05-29 08:53:41 +010010263LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
10264 armnn::IWorkloadFactory& workloadFactory,
10265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10266{
10267 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10268}
10269
10270LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
10271 armnn::IWorkloadFactory& workloadFactory,
10272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10273{
10274 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10275}
10276
10277LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
10278 armnn::IWorkloadFactory& workloadFactory,
10279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10280{
10281 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10282}
10283
10284LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
10285 armnn::IWorkloadFactory& workloadFactory,
10286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10287{
10288 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10289}
10290
10291LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
10292 armnn::IWorkloadFactory& workloadFactory,
10293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10294{
10295 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10296}
10297
10298LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
10299 armnn::IWorkloadFactory& workloadFactory,
10300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10301{
10302 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10303}
10304
10305LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
10306 armnn::IWorkloadFactory& workloadFactory,
10307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10308{
10309 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10310}
10311
10312LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
10313 armnn::IWorkloadFactory& workloadFactory,
10314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10315{
10316 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10317}
10318
10319LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
10320 armnn::IWorkloadFactory& workloadFactory,
10321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10322{
10323 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10324}
10325
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010326LayerTestResult<float, 4> Debug4DFloat32Test(
10327 armnn::IWorkloadFactory& workloadFactory,
10328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10329{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010330 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010331}
10332
10333LayerTestResult<float, 3> Debug3DFloat32Test(
10334 armnn::IWorkloadFactory& workloadFactory,
10335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10336{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010337 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010338}
10339
10340LayerTestResult<float, 2> Debug2DFloat32Test(
10341 armnn::IWorkloadFactory& workloadFactory,
10342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10343{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010344 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010345}
10346
10347LayerTestResult<float, 1> Debug1DFloat32Test(
10348 armnn::IWorkloadFactory& workloadFactory,
10349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10350{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010351 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010352}
10353
10354LayerTestResult<uint8_t, 4> Debug4DUint8Test(
10355 armnn::IWorkloadFactory& workloadFactory,
10356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10357{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010358 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010359}
10360
10361LayerTestResult<uint8_t, 3> Debug3DUint8Test(
10362 armnn::IWorkloadFactory& workloadFactory,
10363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10364{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010365 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010366}
10367
10368LayerTestResult<uint8_t, 2> Debug2DUint8Test(
10369 armnn::IWorkloadFactory& workloadFactory,
10370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10371{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010372 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010373}
10374
10375LayerTestResult<uint8_t, 1> Debug1DUint8Test(
10376 armnn::IWorkloadFactory& workloadFactory,
10377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10378{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000010379 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000010380}
Matteo Martincigh49124022019-01-11 13:25:59 +000010381
narpra014951d842019-01-18 16:53:53 +000010382LayerTestResult<float, 1> Gather1DParamsFloatTest(
10383 armnn::IWorkloadFactory& workloadFactory,
10384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10385{
10386 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10387}
10388
10389LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
10390 armnn::IWorkloadFactory& workloadFactory,
10391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10392{
10393 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10394}
10395
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010396LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
10397 armnn::IWorkloadFactory& workloadFactory,
10398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10399{
10400 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10401}
10402
narpra014951d842019-01-18 16:53:53 +000010403LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
10404 armnn::IWorkloadFactory& workloadFactory,
10405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10406{
10407 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10408}
10409
10410LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
10411 armnn::IWorkloadFactory& workloadFactory,
10412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10413{
10414 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10415}
10416
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010417LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
10418 armnn::IWorkloadFactory& workloadFactory,
10419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10420{
10421 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10422}
10423
narpra014951d842019-01-18 16:53:53 +000010424LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
10425 armnn::IWorkloadFactory& workloadFactory,
10426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10427{
10428 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
10429}
10430
10431LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
10432 armnn::IWorkloadFactory& workloadFactory,
10433 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10434{
10435 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
10436 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +000010437}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010438
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +010010439LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
10440 armnn::IWorkloadFactory& workloadFactory,
10441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10442{
10443 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
10444 workloadFactory, memoryManager);
10445}
10446
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010447LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000010448 armnn::IWorkloadFactory& workloadFactory,
10449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10450{
10451 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10452}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010453
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +010010454LayerTestResult<float, 4> DequantizeOffsetUint8Test(
10455 armnn::IWorkloadFactory& workloadFactory,
10456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10457{
10458 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10459}
10460
10461LayerTestResult<float, 4> DequantizeSimpleInt16Test(
10462 armnn::IWorkloadFactory& workloadFactory,
10463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10464{
10465 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10466}
10467
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010010468LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
10469 armnn::IWorkloadFactory& workloadFactory,
10470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10471{
10472 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10473}
10474
10475LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
10476 armnn::IWorkloadFactory& workloadFactory,
10477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10478{
10479 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
10480}
10481
10482LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
10483 armnn::IWorkloadFactory& workloadFactory,
10484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10485{
10486 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
10487}