blob: d6e0e879fd0d46b662e326b59dd0a7963a0641b4 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Teresa Charlinedeeb162019-06-14 11:09:19 +0100586LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
587 armnn::IWorkloadFactory& workloadFactory,
588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
589 armnn::DataLayout layout)
590{
591 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
592 workloadFactory, memoryManager, layout, 0.0f, 0);
593}
594
595LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
596 armnn::IWorkloadFactory& workloadFactory,
597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
598 armnn::DataLayout layout)
599{
600 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
601 <armnn::DataType::Float32, armnn::DataType::Float32>(
602 workloadFactory, memoryManager, layout, 0.0f, 0);
603}
604
605LayerTestResult<float, 4> Convolution1dTest(
606 armnn::IWorkloadFactory& workloadFactory,
607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
608 bool biasEnabled)
609{
610 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
611 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
612}
613
614LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 bool biasEnabled)
618{
619 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
620 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
621}
622
623LayerTestResult<float,4> CompareConvolution2dTest(
624 armnn::IWorkloadFactory& workloadFactory,
625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
626 armnn::IWorkloadFactory& refWorkloadFactory)
627{
628 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
629 workloadFactory, memoryManager, refWorkloadFactory);
630}
631
632template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
633LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const std::vector<float>& inputNoQuantizedValues,
637 armnn::TensorInfo& inputTensorInfo,
638 const std::vector<float>& kernelNoQuantizedValues,
639 armnn::TensorInfo& kernelTensorInfo,
640 const std::vector<float>& outputExpectedNoQuantizedValues,
641 armnn::TensorInfo& outputTensorInfo,
642 uint32_t dilationX,
643 uint32_t dilationY,
644 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100645 uint32_t padLeft = 0,
646 uint32_t padTop = 0,
647 uint32_t padRight = 0,
648 uint32_t padBottom = 0,
649 uint32_t strideX = 1,
650 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100651 bool biasEnabled = false
652)
653{
654 float qScale;
655 int32_t qOffset;
656 switch (ArmnnType)
657 {
658 case armnn::DataType::QuantisedAsymm8:
659 {
660 qScale = 0.1f;
661 qOffset = 128;
662 break;
663 }
664 case armnn::DataType::QuantisedSymm16:
665 {
666 qScale = 0.1f;
667 qOffset = 0;
668 break;
669 }
670 case armnn::DataType::Float32:
671 default:
672 {
673 qScale = 0.f;
674 qOffset = 0;
675 break;
676 }
677 }
678
679 inputTensorInfo.SetQuantizationScale(qScale);
680 inputTensorInfo.SetQuantizationOffset(qOffset);
681 kernelTensorInfo.SetQuantizationScale(qScale);
682 kernelTensorInfo.SetQuantizationOffset(qOffset);
683 outputTensorInfo.SetQuantizationScale(qScale);
684 outputTensorInfo.SetQuantizationOffset(qOffset);
685
686 auto input = MakeTensor<T, 4>(inputTensorInfo,
687 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
688 inputTensorInfo.GetQuantizationOffset(),
689 inputNoQuantizedValues)));
690 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
691 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
692 kernelTensorInfo.GetQuantizationOffset(),
693 kernelNoQuantizedValues)));
694 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
695 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
696 outputTensorInfo.GetQuantizationOffset(),
697 outputExpectedNoQuantizedValues)));
698
Teresa Charlinedeeb162019-06-14 11:09:19 +0100699 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
700 workloadFactory,
701 memoryManager,
702 input,
703 kernel,
704 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
705 expectedOutput,
706 qScale,
707 qOffset,
708 layout,
709 padLeft,
710 padTop,
711 padRight,
712 padBottom,
713 strideX,
714 strideY,
715 dilationX,
716 dilationY);
717}
718
719template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
720LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
721 armnn::IWorkloadFactory& workloadFactory,
722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
723 bool biasEnabled,
724 const armnn::DataLayout layout)
725{
726 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
727 std::vector<float> inputNoQuantizedValues =
728 {
729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
733 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
739 };
740
741 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
742 std::vector<float> kernelNoQuantizedValues =
743 {
744 1, 2, 3,
745 4, 5, 6,
746 7, 8, 9
747 };
748
749 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
750 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
751 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
752 std::vector<float> outputExpectedNoQuantizedValues =
753 {
754 6., 5., 5., 5.,
755 6., 5., 5., 5.,
756 6., 5., 5., 5.,
757 3., 2., 2., 2.
758 };
759
760 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
761 workloadFactory,
762 memoryManager,
763 inputNoQuantizedValues,
764 inputTensorInfo,
765 kernelNoQuantizedValues,
766 kernelTensorInfo,
767 outputExpectedNoQuantizedValues,
768 outputTensorInfo,
769 3,
770 3,
771 layout,
772 biasEnabled);
773}
774
775template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
776LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779 bool biasEnabled,
780 const armnn::DataLayout layout)
781{
782 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
783 std::vector<float> inputNoQuantizedValues =
784 {
785 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
789 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
792 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795
796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
797 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
800 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
803 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
806 };
807
808 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
809 std::vector<float> kernelNoQuantizedValues =
810 {
811 1, 2, 3,
812 4, 5, 6,
813 7, 8, 9,
814
815 1, 2, 3,
816 4, 5, 6,
817 7, 8, 9
818 };
819
820 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
821 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
822 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
823 std::vector<float> outputExpectedNoQuantizedValues =
824 {
825 12., 10., 10., 10.,
826 12., 10., 10., 10.,
827 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100828 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100829 };
830
831 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
832 workloadFactory,
833 memoryManager,
834 inputNoQuantizedValues,
835 inputTensorInfo,
836 kernelNoQuantizedValues,
837 kernelTensorInfo,
838 outputExpectedNoQuantizedValues,
839 outputTensorInfo,
840 3,
841 3,
842 layout,
843 biasEnabled);
844}
845
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100846template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
847LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
848 armnn::IWorkloadFactory &workloadFactory,
849 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
850 bool biasEnabled,
851 const armnn::DataLayout layout)
852{
853 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
854 std::vector<float> inputNoQuantizedValues =
855 {
856 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
857 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
858 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
859 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
860 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
861 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
862 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
863 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
864 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
865 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
866 };
867
868 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
869 std::vector<float> kernelNoQuantizedValues =
870 {
871 1, 2,
872 3, 4
873 };
874
875 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
876 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
Jan Eilers0bf6b232019-07-12 10:46:33 +0100877 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100878 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
879 std::vector<float> outputExpectedNoQuantizedValues =
880 {
881 4, 7, 7, 3,
882 6, 10, 10, 4,
883 6, 10, 10, 4,
884 2, 3, 3, 1
885 };
886 uint32_t padLeft = 1;
887 uint32_t padTop = 1;
888 uint32_t padRight = 1;
889 uint32_t padBottom = 1;
890
891 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
892 workloadFactory,
893 memoryManager,
894 inputNoQuantizedValues,
895 inputTensorInfo,
896 kernelNoQuantizedValues,
897 kernelTensorInfo,
898 outputExpectedNoQuantizedValues,
899 outputTensorInfo,
900 2,
901 2,
902 layout,
903 padLeft,
904 padTop,
905 padRight,
906 padBottom,
907 3,
908 3,
909 biasEnabled
910 );
911}
912
Teresa Charlinedeeb162019-06-14 11:09:19 +0100913template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
914Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
915 armnn::IWorkloadFactory&,
916 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
917 bool,
918 armnn::DataLayout);
919
920template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
921Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
922 armnn::IWorkloadFactory&,
923 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
924 bool,
925 armnn::DataLayout);
926
927template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
928Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
929 armnn::IWorkloadFactory&,
930 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
931 bool,
932 armnn::DataLayout);
933
934template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
935Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
936 armnn::IWorkloadFactory&,
937 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
938 bool,
939 armnn::DataLayout);
940
941template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
942Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
943 armnn::IWorkloadFactory&,
944 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
945 bool,
946 armnn::DataLayout);
947
948template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
949Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
950 armnn::IWorkloadFactory&,
951 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
952 bool,
953 armnn::DataLayout);
954
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100955template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
956Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
957 armnn::IWorkloadFactory &workloadFactory,
958 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
959 bool biasEnabled,
960 const armnn::DataLayout layout);
961
962template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
963Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
964 armnn::IWorkloadFactory &workloadFactory,
965 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
966 bool biasEnabled,
967 const armnn::DataLayout layout);
968
969template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
970Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
971 armnn::IWorkloadFactory &workloadFactory,
972 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
973 bool biasEnabled,
974 const armnn::DataLayout layout);
975
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000976template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
977 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000978LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
979 armnn::IWorkloadFactory& workloadFactory,
980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
981 float qScale,
982 int32_t qOffset,
983 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000984 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100985{
telsoa01c577f2c2018-08-31 09:22:23 +0100986 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000987 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100988 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100989 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
990 {
surmeh013537c2c2018-05-18 16:31:43 +0100991 0, 1, 2, 3, 4,
992 5, 6, 7, 8, 9,
993 10, 11, 12, 13, 14,
994 15, 16, 17, 18, 19,
995 20, 21, 22, 23, 24,
996
997 25, 26, 27, 28, 29,
998 30, 31, 32, 33, 34,
999 35, 36, 37, 38, 39,
1000 40, 41, 42, 43, 44,
1001 45, 46, 47, 48, 49
1002 })));
1003
telsoa01c577f2c2018-08-31 09:22:23 +01001004 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001005 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001006 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001007 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1008 {
surmeh013537c2c2018-05-18 16:31:43 +01001009 32, 31, 30, 29,
1010 28, 27, 26, 25,
1011 24, 23, 22, 21,
1012 20, 19, 18, 17,
1013
1014 16, 15, 14, 13,
1015 12, 11, 10, 9,
1016 8, 7, 6, 5,
1017 4, 3, 2, 1
1018 })));
1019
telsoa01c577f2c2018-08-31 09:22:23 +01001020 // Expected output is 1 batch of a 2-channel 5x5 image.
1021 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001022 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001023 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001024 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1025 {
surmeh013537c2c2018-05-18 16:31:43 +01001026 1062, 1580, 1850, 1530, 1117,
1027 2140, 3108, 3500, 2842, 2042,
1028 3580, 5068, 5460, 4342, 3062,
1029 3618, 5072, 5390, 4248, 2971,
1030 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001031
surmeh013537c2c2018-05-18 16:31:43 +01001032 1550, 2284, 2362, 1955, 1428,
1033 2910, 4206, 4342, 3528, 2536,
1034 3390, 4886, 5022, 4068, 2916,
1035 3566, 5056, 5182, 4133, 2922,
1036 3100, 4352, 4452, 3517, 2465
1037 })));
1038
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001039 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1040 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001041 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001042 input,
1043 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001044 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001045 expectedOutput,
1046 qScale,
1047 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001048 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001049 1, // Padding left.
1050 1, // Padding top.
1051 2, // Padding right.
1052 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001053 1, // strideX
1054 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001055}
1056
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001057template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1058 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001059LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1060 armnn::IWorkloadFactory& workloadFactory,
1061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1062 float qScale,
1063 int32_t qOffset,
1064 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001065{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001066 auto layout = armnn::DataLayout::NHWC;
1067
1068 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001069 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001070 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1071 {
1072 0, 1, 2, 3, 4,
1073 5, 6, 7, 8, 9,
1074 10, 11, 12, 13, 14,
1075 15, 16, 17, 18, 19,
1076 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001077
Teresa Charlin20b1f882019-06-19 09:34:37 +01001078 25, 26, 27, 28, 29,
1079 30, 31, 32, 33, 34,
1080 35, 36, 37, 38, 39,
1081 40, 41, 42, 43, 44,
1082 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001083 })));
1084
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001085 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001086 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001087 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1088 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001089 32, 31, 30, 29,
1090 28, 27, 26, 25,
1091 24, 23, 22, 21,
1092 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001093
Matteo Martincigh747ef822018-12-18 09:26:39 +00001094 16, 15, 14, 13,
1095 12, 11, 10, 9,
1096 8, 7, 6, 5,
1097 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001098 })));
1099
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001101 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001102 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1103 {
1104 1062, 1580, 1850, 1530, 1117,
1105 2140, 3108, 3500, 2842, 2042,
1106 3580, 5068, 5460, 4342, 3062,
1107 3618, 5072, 5390, 4248, 2971,
1108 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001109
Teresa Charlin20b1f882019-06-19 09:34:37 +01001110 1550, 2284, 2362, 1955, 1428,
1111 2910, 4206, 4342, 3528, 2536,
1112 3390, 4886, 5022, 4068, 2916,
1113 3566, 5056, 5182, 4133, 2922,
1114 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001115 })));
1116
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001118 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001119 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001120 input,
1121 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001122 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123 expectedOutput,
1124 qScale,
1125 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001126 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001127 1, // Padding left.
1128 1, // Padding top.
1129 2, // Padding right.
1130 2, // Padding bottom.
1131 1, // strideX
1132 1); // strideY
1133}
1134
Bruno Goncalves22972f02019-04-26 21:03:24 -03001135template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1136 typename T = armnn::ResolveType<ArmnnType>>
1137LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1138 armnn::IWorkloadFactory& workloadFactory,
1139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1140 float qScale,
1141 int32_t qOffset,
1142 bool biasEnabled)
1143{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001144 auto layout = armnn::DataLayout::NHWC;
1145
1146 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001147 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001148 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1149 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001150 0, 0, 0, 0, 0, 0, 0, 0, 0,
1151 0, 0, 0, 0, 0, 0, 0, 0, 0,
1152 0, 0, 0, 0, 0, 0, 0, 0, 0,
1153 0, 0, 0, 1, 1, 1, 0, 0, 0,
1154 0, 0, 0, 1, 1, 1, 0, 0, 0,
1155 0, 0, 0, 1, 1, 1, 0, 0, 0,
1156 0, 0, 0, 0, 0, 0, 0, 0, 0,
1157 0, 0, 0, 0, 0, 0, 0, 0, 0,
1158 0, 0, 0, 0, 0, 0, 0, 0, 0
1159 })));
1160
1161 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1162 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001163 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1164 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165 1, 2, 3,
1166 4, 5, 6,
1167 7, 8, 9
1168 })));
1169
1170 uint32_t padLeft = 0;
1171 uint32_t padTop = 0;
1172 uint32_t padRight = 0;
1173 uint32_t padBottom = 0;
1174 uint32_t strideX = 1;
1175 uint32_t strideY = 1;
1176 uint32_t dilationX = 3;
1177 uint32_t dilationY = 3;
1178
1179 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001180 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001181 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001182 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1183 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001184 5, 5, 5,
1185 5, 5, 5,
1186 5, 5, 5
1187 })));
1188
Teresa Charlin20b1f882019-06-19 09:34:37 +01001189 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001190 workloadFactory,
1191 memoryManager,
1192 input,
1193 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001194 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 expectedOutput,
1196 qScale,
1197 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001198 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001199 padLeft,
1200 padTop,
1201 padRight,
1202 padBottom,
1203 strideX,
1204 strideY,
1205 dilationX,
1206 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001207}
1208
Teresa Charlin20b1f882019-06-19 09:34:37 +01001209
1210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1211LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1214 const std::vector<float>& inputNoQuantizedValues,
1215 armnn::TensorInfo& inputTensorInfo,
1216 const std::vector<float>& kernelNoQuantizedValues,
1217 armnn::TensorInfo& kernelTensorInfo,
1218 const std::vector<float>& outputExpectedNoQuantizedValues,
1219 armnn::TensorInfo& outputTensorInfo,
1220 uint32_t dilationX,
1221 uint32_t dilationY,
1222 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1223 bool biasEnabled = false)
1224{
1225 float qScale;
1226 int32_t qOffset;
1227 switch (ArmnnType)
1228 {
1229 case armnn::DataType::QuantisedAsymm8:
1230 {
1231 qScale = 0.1f;
1232 qOffset = 128;
1233 break;
1234 }
1235 case armnn::DataType::QuantisedSymm16:
1236 {
1237 qScale = 0.1f;
1238 qOffset = 0;
1239 break;
1240 }
1241 case armnn::DataType::Float32:
1242 default:
1243 {
1244 qScale = 0.f;
1245 qOffset = 0;
1246 break;
1247 }
1248 }
1249
1250 inputTensorInfo.SetQuantizationScale(qScale);
1251 inputTensorInfo.SetQuantizationOffset(qOffset);
1252 kernelTensorInfo.SetQuantizationScale(qScale);
1253 kernelTensorInfo.SetQuantizationOffset(qOffset);
1254 outputTensorInfo.SetQuantizationScale(qScale);
1255 outputTensorInfo.SetQuantizationOffset(qOffset);
1256
1257 auto input = MakeTensor<T, 4>(inputTensorInfo,
1258 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1259 inputTensorInfo.GetQuantizationOffset(),
1260 inputNoQuantizedValues)));
1261 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1262 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1263 kernelTensorInfo.GetQuantizationOffset(),
1264 kernelNoQuantizedValues)));
1265 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1266 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1267 outputTensorInfo.GetQuantizationOffset(),
1268 outputExpectedNoQuantizedValues)));
1269
1270 uint32_t padLeft = 0;
1271 uint32_t padTop = 0;
1272 uint32_t padRight = 0;
1273 uint32_t padBottom = 0;
1274 uint32_t strideX = 1;
1275 uint32_t strideY = 1;
1276
1277 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1278 workloadFactory,
1279 memoryManager,
1280 input,
1281 kernel,
1282 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1283 expectedOutput,
1284 qScale,
1285 qOffset,
1286 layout,
1287 padLeft,
1288 padTop,
1289 padRight,
1290 padBottom,
1291 strideX,
1292 strideY,
1293 dilationX,
1294 dilationY);
1295}
1296
1297template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1298LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1299 armnn::IWorkloadFactory& workloadFactory,
1300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1301 bool biasEnabled,
1302 const armnn::DataLayout layout)
1303{
1304 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1305 std::vector<float> inputNoQuantizedValues =
1306 {
1307 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1308 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1309 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1310 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1311 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1312 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1313 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1314 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1315 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1316 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1317 };
1318
1319 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1320 std::vector<float> kernelNoQuantizedValues =
1321 {
1322 1, 2, 3,
1323 4, 5, 6,
1324 7, 8, 9
1325 };
1326
1327 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1328 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1329 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1330 std::vector<float> outputExpectedNoQuantizedValues =
1331 {
1332 6., 5., 5., 5.,
1333 6., 5., 5., 5.,
1334 6., 5., 5., 5.,
1335 3., 2., 2., 2.
1336 };
1337
1338 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1339 workloadFactory,
1340 memoryManager,
1341 inputNoQuantizedValues,
1342 inputTensorInfo,
1343 kernelNoQuantizedValues,
1344 kernelTensorInfo,
1345 outputExpectedNoQuantizedValues,
1346 outputTensorInfo,
1347 3,
1348 3,
1349 layout,
1350 biasEnabled);
1351}
1352
1353template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1354LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1355 armnn::IWorkloadFactory& workloadFactory,
1356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1357 bool biasEnabled,
1358 const armnn::DataLayout layout)
1359{
1360 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1361 std::vector<float> inputNoQuantizedValues =
1362 {
1363 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1364 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1365 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1366 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1367 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1368 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1369 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1370 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1372 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1373
1374 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1375 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1376 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1377 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1378 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1379 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1384 };
1385
1386 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1387 std::vector<float> kernelNoQuantizedValues =
1388 {
1389 1, 2, 3,
1390 4, 5, 6,
1391 7, 8, 9,
1392
1393 1, 2, 3,
1394 4, 5, 6,
1395 7, 8, 9
1396 };
1397
1398 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1399 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1400 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1401 std::vector<float> outputExpectedNoQuantizedValues =
1402 {
1403 6., 5., 5., 5.,
1404 6., 5., 5., 5.,
1405 6., 5., 5., 5.,
1406 3., 2., 2., 2.,
1407
1408 6., 5., 5., 5.,
1409 6., 5., 5., 5.,
1410 6., 5., 5., 5.,
1411 3., 2., 2., 2.
1412 };
1413
1414 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1415 workloadFactory,
1416 memoryManager,
1417 inputNoQuantizedValues,
1418 inputTensorInfo,
1419 kernelNoQuantizedValues,
1420 kernelTensorInfo,
1421 outputExpectedNoQuantizedValues,
1422 outputTensorInfo,
1423 3,
1424 3,
1425 layout,
1426 biasEnabled);
1427}
1428
1429
1430template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1431DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1432 armnn::IWorkloadFactory&,
1433 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1434 bool,
1435 armnn::DataLayout);
1436
1437template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1438DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1439 armnn::IWorkloadFactory&,
1440 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1441 bool,
1442 armnn::DataLayout);
1443
1444template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1445DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1446 armnn::IWorkloadFactory&,
1447 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1448 bool,
1449 armnn::DataLayout);
1450
1451template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1452DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1453 armnn::IWorkloadFactory&,
1454 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1455 bool,
1456 armnn::DataLayout);
1457
1458template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1459DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1460 armnn::IWorkloadFactory&,
1461 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1462 bool,
1463 armnn::DataLayout);
1464
1465template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1466DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1467 armnn::IWorkloadFactory&,
1468 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1469 bool,
1470 armnn::DataLayout);
1471
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001472LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1473 armnn::IWorkloadFactory& workloadFactory,
1474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1475 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001476 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001477{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001478 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001479 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001480}
1481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001482LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1483 armnn::IWorkloadFactory& workloadFactory,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1485 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001487 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1488 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001489}
1490
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001491LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1492 armnn::IWorkloadFactory& workloadFactory,
1493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1494 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001495 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001496{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001497 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001498 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001499}
1500
Matthew Jacksond6a9dee2019-07-22 13:53:24 +01001501LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
1502 armnn::IWorkloadFactory& workloadFactory,
1503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1504{
1505 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
1506 auto input = MakeTensor<float, 4>(inputTensorInfo, { 1.f, 2.f, 3.f, 4.f });
1507
1508 std::vector<float> kernelData;
1509 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
1510 for (unsigned int i = 0; i < 64; ++i)
1511 {
1512 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
1513 }
1514 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
1515 auto kernel = MakeTensor<float, 4>(kernelTensorInfo, kernelData);
1516
1517 std::vector<float> expectedOutputData(64, 0.f);
1518 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
1519 auto expectedOutput = MakeTensor<float, 4>(outputTensorInfo, expectedOutputData);
1520
1521 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
1522 workloadFactory,
1523 memoryManager,
1524 input,
1525 kernel,
1526 boost::multi_array<float, 1>(),
1527 expectedOutput,
1528 0.f,
1529 0,
1530 armnn::DataLayout::NCHW);
1531}
1532
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001533LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1534 armnn::IWorkloadFactory& workloadFactory,
1535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1536 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001537 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001538{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001539 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001540 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001541}
1542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001543LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1544 armnn::IWorkloadFactory& workloadFactory,
1545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1546 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001547 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001548{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001549 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001550 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001551}
1552
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001553LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1554 armnn::IWorkloadFactory& workloadFactory,
1555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1556 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001557 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001558{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001559 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001560 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001561}
1562
Bruno Goncalves22972f02019-04-26 21:03:24 -03001563LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1566{
1567 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001568 workloadFactory,
1569 memoryManager,
1570 0.f,
1571 0,
1572 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001573}
1574
Ruomei Yan88d44b82019-05-23 14:29:06 +01001575LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1576 armnn::IWorkloadFactory& workloadFactory,
1577 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1578 bool biasEnabled,
1579 const armnn::DataLayout layout)
1580{
1581 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1582 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1583}
1584
1585LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1586 armnn::IWorkloadFactory& workloadFactory,
1587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1588 bool biasEnabled,
1589 const armnn::DataLayout layout)
1590{
1591 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1592 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1593}
1594
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001595LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001596 armnn::IWorkloadFactory& workloadFactory,
1597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1598 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001599 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001600{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001601 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1602 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001603}
1604
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001605LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 armnn::IWorkloadFactory& refWorkloadFactory,
1609 const armnn::DataLayout layout)
1610{
1611 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1612 workloadFactory, memoryManager, refWorkloadFactory, layout);
1613}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001614
1615LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001618{
1619 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1620 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001621 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001622}
1623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001624LayerTestResult<float,4> SimpleNormalizationWithinTest(
1625 armnn::IWorkloadFactory& workloadFactory,
1626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001627{
1628 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1629 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001630 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001631}
1632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001633LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1634 armnn::IWorkloadFactory& workloadFactory,
1635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001636{
1637 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1638 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001639 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001640}
1641
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001642LayerTestResult<float,2> SimpleSoftmaxTest(
1643 armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1645 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001647 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001648}
1649
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001650LayerTestResult<float,3> Simple3dSoftmaxTest(
1651 armnn::IWorkloadFactory& workloadFactory,
1652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1653 float beta)
1654{
1655 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1656}
1657
1658LayerTestResult<float,4> Simple4dSoftmaxTest(
1659 armnn::IWorkloadFactory& workloadFactory,
1660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1661 float beta)
1662{
1663 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1664}
1665
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001666LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1667 armnn::IWorkloadFactory& workloadFactory,
1668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1669 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001671 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001672}
1673
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001674LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1675 armnn::IWorkloadFactory& workloadFactory,
1676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1677 float beta)
1678{
1679 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1680}
1681
1682LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1683 armnn::IWorkloadFactory& workloadFactory,
1684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1685 float beta)
1686{
1687 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1688}
1689
nikraj01248683f2019-05-29 16:46:50 +01001690LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1691 armnn::IWorkloadFactory& workloadFactory,
1692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1693 float beta)
1694{
1695 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1696}
1697
1698LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1699 armnn::IWorkloadFactory& workloadFactory,
1700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1701 float beta)
1702{
1703 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1704}
1705
1706LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1707 armnn::IWorkloadFactory& workloadFactory,
1708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1709 float beta)
1710{
1711 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1712}
1713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001714LayerTestResult<float,4> CompareNormalizationTest(
1715 armnn::IWorkloadFactory& workloadFactory,
1716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1717 armnn::IWorkloadFactory& refWorkloadFactory,
1718 armnn::NormalizationAlgorithmChannel normChannel,
1719 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001720{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001721 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001722}
1723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001724LayerTestResult<float,2> CompareSoftmaxTest(
1725 armnn::IWorkloadFactory& workloadFactory,
1726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001727 armnn::IWorkloadFactory& refWorkloadFactory,
1728 float beta)
1729{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001730 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1731 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001732}
1733
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001734LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1735 armnn::IWorkloadFactory& workloadFactory,
1736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001737 armnn::IWorkloadFactory& refWorkloadFactory,
1738 float beta)
1739{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001740 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1741 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001742}
1743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001744std::vector<LayerTestResult<float,3>> SplitterTest(
1745 armnn::IWorkloadFactory& workloadFactory,
1746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001748 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001749}
1750
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001751std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1752 armnn::IWorkloadFactory& workloadFactory,
1753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001755 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001756}
1757
Ruomei Yan25339c32019-05-28 16:48:20 +01001758std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1759 armnn::IWorkloadFactory& workloadFactory,
1760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1761{
1762 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1763}
1764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001765LayerTestResult<float, 3> CopyViaSplitterTest(
1766 armnn::IWorkloadFactory& workloadFactory,
1767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001769 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001770}
1771
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001772LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1773 armnn::IWorkloadFactory& workloadFactory,
1774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001775{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001776 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001777}
1778
Ruomei Yan25339c32019-05-28 16:48:20 +01001779LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1780 armnn::IWorkloadFactory& workloadFactory,
1781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1782{
1783 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1784}
1785
Jan Eilers38e05bd2019-06-26 13:10:09 +01001786void LstmUtilsZeroVectorTest()
1787{
1788 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1789 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1790 {2., 3., 3., 4.}));
1791
1792 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1793 {0., 0., 0., 0.}));
1794
1795 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1796}
1797
1798void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1799{
1800 uint32_t batchSize = 2;
1801 uint32_t vecSize = 4;
1802 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1803 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1804 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
1805 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
1806
1807 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1808 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
1809 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
1810
1811 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1812 vecSize, batchSize, expectedOutput);
1813}
1814
1815void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1816{
1817 uint32_t batchSize = 2;
1818 uint32_t vecSize = 4;
1819 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1820 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1821 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1822 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1823
1824 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1825 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1826 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1827
1828 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1829 vecSize, batchSize, expectedOutput);
1830}
1831
1832void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1833{
1834 uint32_t batchSize = 2;
1835 uint32_t vecSize = 4;
1836 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1837 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1838 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1839 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
1840
1841 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1842 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1843 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
1844
1845 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1846 vecSize, batchSize, expectedOutput);
1847}
1848
1849
1850void LstmUtilsVectorBatchVectorCwiseProductTest()
1851{
1852 uint32_t batchSize = 4;
1853 uint32_t vecSize = 29;
1854 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1855 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1856 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1857 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1858 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
1859
1860 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1861 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1862 { /* batch 0 */
1863 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1864 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1865 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
1866 /* batch 1 */
1867 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
1868 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1869 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
1870 /* batch 2 */
1871 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
1872 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1873 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
1874 /* batch 3 */
1875 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
1876 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1877 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
1878
1879 // Expect output = input * output + output.
1880 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1881 { /* batch 0 */
1882 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
1883 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
1884 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
1885 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
1886 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
1887 /* batch 1 */
1888 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
1889 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1890 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1891 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1892 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
1893 /* batch 2 */
1894 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
1895 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
1896 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
1897 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
1898 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
1899 /* batch 3 */
1900 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
1901 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
1902 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
1903 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
1904 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
1905
1906 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1907 vecSize, batchSize, expectedOutput);
1908}
1909
1910
1911void LstmUtilsVectorBatchVectorAddTest()
1912{
1913 uint32_t batchSize = 2;
1914 uint32_t vecSize = 3;
1915 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1916 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1917 { 0.0f, -0.5f, 1.0f}));
1918
1919 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1920 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1921 { 1.0f, 2.0f, 3.0f, //batch 0
1922 4.0f, 5.0f, 6.0f})); //batch 1
1923
1924 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1925 { 1.0f, 1.5f, 4.0f,
1926 4.0f, 4.5f, 7.0f}));
1927
1928 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1929 vecSize, batchSize, expectedOutput);
1930}
1931
1932
telsoa01c577f2c2018-08-31 09:22:23 +01001933LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001934 armnn::IWorkloadFactory& workloadFactory,
1935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001936{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001937 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001938 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1939 { 2., 3., 3., 4. }));
1940
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001941 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001942 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1943 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1944 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001945 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001946 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001947}
1948
1949LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001950 armnn::IWorkloadFactory& workloadFactory,
1951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001952{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001953 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001954 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1955 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1956 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1957
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001958 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001959 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1960 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1961 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1962 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1963 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1964 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1965 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1966 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001967 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1968 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001969}
1970
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001971LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1972 armnn::IWorkloadFactory& workloadFactory,
1973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001975 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001976 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1977 {2., 3., 3., 4.}));
1978
1979
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001980 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001981 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1982 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1983 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1984
Conor Kennedyb9971c92019-05-07 07:14:23 +01001985 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001986 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001987}
1988
Jan Eilers38e05bd2019-06-26 13:10:09 +01001989
1990LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1991 armnn::IWorkloadFactory& workloadFactory,
1992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1993{
1994 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1995 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1996 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
1997 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
1998
1999 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
2000 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
2001 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
2002 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
2003 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
2004 workloadFactory, memoryManager, input, expectedOutput);
2005}
2006
2007
Conor Kennedyb9971c92019-05-07 07:14:23 +01002008LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2011{
2012 const float qScale = 1.0f;
2013 const int32_t qOffset = 0;
2014
2015 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2016 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2017
2018 armnn::TensorInfo inputDesc({2, 2}, datatype);
2019 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2020 std::vector<float>{2., 3., 3., 4.}));
2021
2022 armnn::TensorInfo outputDesc({2, 4}, datatype);
2023 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2024 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2025 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2026
2027 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2028 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2029
2030}
2031
2032LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2033 armnn::IWorkloadFactory& workloadFactory,
2034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2035{
2036 const float qScale = 1.0f;
2037 const int32_t qOffset = 0;
2038
2039 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2040 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2041
2042 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2043 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2044 std::vector<float>({ 2., 3., 3., 4. })));
2045
2046 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2047 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2048 qOffset, std::vector<float>(
2049 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2050 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2051
2052 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2053 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2054}
2055
2056LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2059{
2060 const float qScale = 2.0f;
2061 const int32_t qOffset = 0;
2062
2063 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2064 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2065
2066 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2067 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2068 qOffset, std::vector<float>(
2069 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2070 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2071
2072 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2073 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2074 qOffset, std::vector<float>(
2075 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2076 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2077 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2078 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2079 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2080 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2081
2082 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2083 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2084}
2085
2086LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2087 armnn::IWorkloadFactory& workloadFactory,
2088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2089{
2090 const float qScale = 1.0f;
2091 const int32_t qOffset = 0;
2092
2093 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2094
2095 armnn::TensorInfo inputDesc({2, 2}, datatype);
2096 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2097 qOffset, std::vector<float>{2., 3., 3., 4.}));
2098
2099 armnn::TensorInfo outputDesc({2, 4}, datatype);
2100 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2101 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2102 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2103
2104 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2105 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2106}
2107
Jim Flynn4ed6c832019-05-20 11:02:46 +01002108LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002109 armnn::IWorkloadFactory& workloadFactory,
2110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002111{
surmeh013537c2c2018-05-18 16:31:43 +01002112 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002113 unsigned int outputHeight = 6;
2114 unsigned int outputChannels = 3;
2115
surmeh013537c2c2018-05-18 16:31:43 +01002116 unsigned int inputWidth1 = 3;
2117 unsigned int inputHeight1 = 6;
2118 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002119
surmeh013537c2c2018-05-18 16:31:43 +01002120 unsigned int inputWidth2 = 3;
2121 unsigned int inputHeight2 = 6;
2122 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002123
telsoa01c577f2c2018-08-31 09:22:23 +01002124 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002125 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2126 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2127 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002128
2129 LayerTestResult<float,3> ret(outputTensorInfo);
2130
telsoa014fcda012018-03-09 14:13:49 +00002131 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002132 {
2133 1.0f, 2.0f, 3.0f,
2134 4.0f, 5.0f, 6.0f,
2135 7.0f, 8.0f, 9.0f,
2136 10.0f, 11.0f, 12.0f,
2137 13.0f, 14.0f, 15.0f,
2138 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002139
surmeh013537c2c2018-05-18 16:31:43 +01002140 19.0f, 20.0f, 21.0f,
2141 22.0f, 23.0f, 24.0f,
2142 25.0f, 26.0f, 27.0f,
2143 28.0f, 29.0f, 30.0f,
2144 31.0f, 32.0f, 33.0f,
2145 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002146
surmeh013537c2c2018-05-18 16:31:43 +01002147 37.0f, 38.0f, 39.0f,
2148 40.0f, 41.0f, 42.0f,
2149 43.0f, 44.0f, 45.0f,
2150 46.0f, 47.0f, 48.0f,
2151 49.0f, 50.0f, 51.0f,
2152 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002153 })
2154 );
2155
telsoa014fcda012018-03-09 14:13:49 +00002156 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2157 {
surmeh013537c2c2018-05-18 16:31:43 +01002158 1.0f, 2.0f, 3.0f,
2159 4.0f, 5.0f, 6.0f,
2160 7.0f, 8.0f, 9.0f,
2161 10.0f, 11.0f, 12.0f,
2162 13.0f, 14.0f, 15.0f,
2163 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002164
surmeh013537c2c2018-05-18 16:31:43 +01002165 19.0f, 20.0f, 21.0f,
2166 22.0f, 23.0f, 24.0f,
2167 25.0f, 26.0f, 27.0f,
2168 28.0f, 29.0f, 30.0f,
2169 31.0f, 32.0f, 33.0f,
2170 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002171 })
2172 );
2173
2174 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2175 {
surmeh013537c2c2018-05-18 16:31:43 +01002176 37.0f, 38.0f, 39.0f,
2177 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002178 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002179 46.0f, 47.0f, 48.0f,
2180 49.0f, 50.0f, 51.0f,
2181 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002182 })
2183 );
2184
telsoa01c577f2c2018-08-31 09:22:23 +01002185 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002186 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002187
telsoa01c577f2c2018-08-31 09:22:23 +01002188 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002189 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002190
telsoa014fcda012018-03-09 14:13:49 +00002191 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2192
2193 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2194
2195 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2196 subTensorsSupported ?
2197 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2198 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2199
2200 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2201 subTensorsSupported ?
2202 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2203 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2204
Jim Flynne242f2d2019-05-22 14:24:13 +01002205 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002206 armnn::WorkloadInfo info;
2207 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2208 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2210
2211 data.m_ViewOrigins.push_back(window1);
2212 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002213
Jim Flynn4ed6c832019-05-20 11:02:46 +01002214 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002215
2216 inputHandle1->Allocate();
2217 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002218 outputHandle->Allocate();
2219
2220 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2221 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002222
Derek Lambertif30f7d32019-04-09 10:25:02 +01002223 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002224 workload->Execute();
2225
2226 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2227
2228 return ret;
2229}
2230
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002231LayerTestResult<float,4> AdditionTest(
2232 armnn::IWorkloadFactory& workloadFactory,
2233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002234{
2235 unsigned int batchSize = 2;
2236 unsigned int channels = 2;
2237 unsigned int height = 2;
2238 unsigned int width = 3;
2239
2240 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2241 armnn::TensorInfo outputTensorInfo;
2242
2243 unsigned int shape[] = {batchSize, channels, height, width};
2244
2245 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2246 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2247 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2248
2249
2250 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2251 {
2252 0.0f, 2.0f, 1.0f,
2253 0.2f, 1.0f, 2.0f,
2254
2255 1.0f, 2.0f, 1.0f,
2256 0.2f, 1.0f, 2.0f,
2257
2258 0.0f, 2.0f, 1.0f,
2259 4.2f, 1.0f, 2.0f,
2260
2261 0.0f, 0.0f, 1.0f,
2262 0.2f, 1.0f, 2.0f,
2263 }));
2264
2265 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2266 {
2267 1.0f, 2.0f, 1.0f,
2268 0.0f, 1.0f, 2.0f,
2269
2270 1.0f, 2.0f, -2.0f,
2271 0.2f, 1.0f, 2.0f,
2272
2273 0.0f, 2.0f, 1.0f,
2274 4.2f, 0.0f, -3.0f,
2275
2276 0.0f, 0.0f, 1.0f,
2277 0.7f, 1.0f, 5.0f,
2278 }));
2279
2280 LayerTestResult<float,4> ret(outputTensorInfo);
2281 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2282 {
2283 1.0f, 4.0f, 2.0f,
2284 0.2f, 2.0f, 4.0f,
2285
2286 2.0f, 4.0f, -1.0f,
2287 0.4f, 2.0f, 4.0f,
2288
2289 0.0f, 4.0f, 2.0f,
2290 8.4f, 1.0f, -1.0f,
2291
2292 0.0f, 0.0f, 2.0f,
2293 0.9f, 2.0f, 7.0f,
2294 }));
2295
2296 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2297 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2298 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2299
2300 armnn::AdditionQueueDescriptor data;
2301 armnn::WorkloadInfo info;
2302 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2303 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2304 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2305
2306 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2307
2308 inputHandle1->Allocate();
2309 inputHandle2->Allocate();
2310 outputHandle->Allocate();
2311
2312 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2313 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2314
Derek Lambertif30f7d32019-04-09 10:25:02 +01002315 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002316 workload->Execute();
2317
2318 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2319
2320 return ret;
2321}
2322
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002323template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002324LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2325 armnn::IWorkloadFactory& workloadFactory,
2326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002327 float qScale,
2328 int32_t qOffset)
2329{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002330 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2331 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2332 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002333
2334 if (armnn::IsQuantizedType<T>())
2335 {
2336 inputTensorInfo1.SetQuantizationScale(qScale);
2337 inputTensorInfo1.SetQuantizationOffset(qOffset);
2338 inputTensorInfo2.SetQuantizationScale(qScale);
2339 inputTensorInfo2.SetQuantizationOffset(qOffset);
2340 outputTensorInfo.SetQuantizationScale(qScale);
2341 outputTensorInfo.SetQuantizationOffset(qOffset);
2342 }
2343
2344 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2345 {
2346 0.0f,
2347 1.0f,
2348
2349 2.0f,
2350 3.0f,
2351
2352 4.0f,
2353 5.0f,
2354 }));
2355
2356 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2357 {
2358 0.5f, 1.5f, 2.5f,
2359 3.5f, 4.5f, 5.5f,
2360 }));
2361
2362 LayerTestResult<T,4> ret(outputTensorInfo);
2363 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2364 {
2365 0.5f, 1.5f, 2.5f,
2366 4.5f, 5.5f, 6.5f,
2367
2368 2.5f, 3.5f, 4.5f,
2369 6.5f, 7.5f, 8.5f,
2370
2371 4.5f, 5.5f, 6.5f,
2372 8.5f, 9.5f, 10.5f,
2373 }));
2374
2375 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2376 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2377 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2378
2379 armnn::AdditionQueueDescriptor data;
2380 armnn::WorkloadInfo info;
2381 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2382 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2383 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2384
2385 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2386
2387 inputHandle1->Allocate();
2388 inputHandle2->Allocate();
2389 outputHandle->Allocate();
2390
2391 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2392 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2393
Derek Lambertif30f7d32019-04-09 10:25:02 +01002394 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002395 workload->Execute();
2396
2397 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2398
2399 return ret;
2400}
2401
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002402template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002403LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2404 armnn::IWorkloadFactory& workloadFactory,
2405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002406 float qScale,
2407 int32_t qOffset)
2408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002409 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2410 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2411 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002412
2413 if (armnn::IsQuantizedType<T>())
2414 {
2415 inputTensorInfo1.SetQuantizationScale(qScale);
2416 inputTensorInfo1.SetQuantizationOffset(qOffset);
2417 inputTensorInfo2.SetQuantizationScale(qScale);
2418 inputTensorInfo2.SetQuantizationOffset(qOffset);
2419 outputTensorInfo.SetQuantizationScale(qScale);
2420 outputTensorInfo.SetQuantizationOffset(qOffset);
2421 }
2422
2423 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2424 {
2425 0.0f, 1.0f, 2.0f,
2426 3.0f, 4.0f, 5.0f,
2427 6.0f, 7.0f, 8.0f,
2428 9.0f, 10.0f, 11.0f,
2429 12.0f, 13.0f, 14.0f,
2430 15.0f, 16.0f, 17.0f,
2431 }));
2432
2433 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2434 {
2435 0.5f,
2436 }));
2437
2438 LayerTestResult<T,4> ret(outputTensorInfo);
2439 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2440 {
2441 0.5f, 1.5f, 2.5f,
2442 3.5f, 4.5f, 5.5f,
2443 6.5f, 7.5f, 8.5f,
2444 9.5f, 10.5f, 11.5f,
2445 12.5f, 13.5f, 14.5f,
2446 15.5f, 16.5f, 17.5f,
2447 }));
2448
2449 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2450 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2451 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2452
2453 armnn::AdditionQueueDescriptor data;
2454 armnn::WorkloadInfo info;
2455 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2456 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2457 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2458
2459 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2460
2461 inputHandle1->Allocate();
2462 inputHandle2->Allocate();
2463 outputHandle->Allocate();
2464
2465 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2466 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2467
Derek Lambertif30f7d32019-04-09 10:25:02 +01002468 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002469 workload->Execute();
2470
2471 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2472
2473 return ret;
2474}
2475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002476LayerTestResult<float, 4> AdditionBroadcastTest(
2477 armnn::IWorkloadFactory& workloadFactory,
2478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002480 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2481 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002482}
2483
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002484LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2485 armnn::IWorkloadFactory& workloadFactory,
2486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002487{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002488 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2489 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002490}
2491
Sadik Armagan2999a022019-04-09 14:20:12 +01002492LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2493 armnn::IWorkloadFactory& workloadFactory,
2494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2495{
2496 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2497 workloadFactory, memoryManager, 2.f, 0);
2498}
2499
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002500LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2501 armnn::IWorkloadFactory& workloadFactory,
2502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002503{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002504 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2505 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002506}
2507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002508LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2509 armnn::IWorkloadFactory& workloadFactory,
2510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002511{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002512 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2513 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002514}
2515
Sadik Armagan2999a022019-04-09 14:20:12 +01002516LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2517 armnn::IWorkloadFactory& workloadFactory,
2518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2519{
2520 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2521 workloadFactory, memoryManager, 0.1333333f, 0);
2522}
2523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002524LayerTestResult<float,4> CompareAdditionTest(
2525 armnn::IWorkloadFactory& workloadFactory,
2526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2527 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002528{
2529 unsigned int batchSize = 4;
2530 unsigned int channels = 1;
2531 unsigned int height = 2;
2532 unsigned int width = 3;
2533
2534 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2535 armnn::TensorInfo outputTensorInfo;
2536
2537 unsigned int shape[] = {batchSize, channels, height, width};
2538
2539 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2540 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2541 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2542
2543 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2544 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2545
2546 LayerTestResult<float,4> ret(outputTensorInfo);
2547
2548 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2549 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2550 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2551
2552 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2553 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2554 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2555
2556 armnn::AdditionQueueDescriptor data;
2557 armnn::WorkloadInfo info;
2558 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2559 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2560 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2561
2562 armnn::AdditionQueueDescriptor refData = data;
2563 armnn::WorkloadInfo refInfo = info;
2564 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2565 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2566 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2567
2568 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2569 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2570
2571 inputHandle1->Allocate();
2572 inputHandle2->Allocate();
2573 outputHandle->Allocate();
2574 inputHandle1Ref->Allocate();
2575 inputHandle2Ref->Allocate();
2576 outputHandleRef->Allocate();
2577
2578 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2579 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2580 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2581 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2582
Derek Lambertif30f7d32019-04-09 10:25:02 +01002583 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002584 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002585 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002586 workloadRef->Execute();
2587
2588 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2589 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2590
2591 return ret;
2592}
2593
surmeh01bceff2f2018-03-29 16:29:27 +01002594namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002595template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002596LayerTestResult<T, 4> DivisionTestHelper(
2597 armnn::IWorkloadFactory& workloadFactory,
2598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2599 const unsigned int shape0[4],
2600 const std::vector<T>& values0,
2601 float scale0,
2602 int32_t offset0,
2603 const unsigned int shape1[4],
2604 const std::vector<T> & values1,
2605 float scale1,
2606 int32_t offset1,
2607 const unsigned int outShape[4],
2608 const std::vector<T> & outValues,
2609 float outScale,
2610 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002611{
Sadik Armagan2999a022019-04-09 14:20:12 +01002612 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2613 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2614 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002615
David Beck5cd01f32018-09-12 16:00:08 +01002616 inputTensorInfo0.SetQuantizationScale(scale0);
2617 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002618
David Beck5cd01f32018-09-12 16:00:08 +01002619 inputTensorInfo1.SetQuantizationScale(scale1);
2620 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002621
David Beck5cd01f32018-09-12 16:00:08 +01002622 outputTensorInfo.SetQuantizationScale(outScale);
2623 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002624
David Beck5cd01f32018-09-12 16:00:08 +01002625 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2626 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002627
David Beck5cd01f32018-09-12 16:00:08 +01002628 LayerTestResult<T, 4> result(outputTensorInfo);
2629 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002630
David Beck5cd01f32018-09-12 16:00:08 +01002631 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2632 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2633 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002634
David Beck5cd01f32018-09-12 16:00:08 +01002635 armnn::DivisionQueueDescriptor data;
2636 armnn::WorkloadInfo info;
2637 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2638 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2639 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002640
David Beck5cd01f32018-09-12 16:00:08 +01002641 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002642
David Beck5cd01f32018-09-12 16:00:08 +01002643 inputHandle0->Allocate();
2644 inputHandle1->Allocate();
2645 outputHandle->Allocate();
2646
2647 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2648 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2649
Derek Lambertif30f7d32019-04-09 10:25:02 +01002650 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002651 workload->Execute();
2652
2653 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2654
2655 return result;
2656}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002657} // anonymous namespace
2658
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002659LayerTestResult<float,4> DivisionByZeroTest(
2660 armnn::IWorkloadFactory& workloadFactory,
2661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002662{
2663 const unsigned int width = 2;
2664 const unsigned int height = 2;
2665 const unsigned int channelCount = 2;
2666 const unsigned int batchSize = 2;
2667
2668 unsigned int shape[] = { batchSize, channelCount, height, width };
2669
2670 std::vector<float> input0({
2671 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2672 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2673
2674 std::vector<float> input1({
2675 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2676 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2677
2678 std::vector<float> output({
2679 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2680 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2681
Sadik Armagan2999a022019-04-09 14:20:12 +01002682 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2683 memoryManager,
2684 shape, input0, 1.0f, 0,
2685 shape, input1, 1.0f, 0,
2686 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002687}
2688
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002689LayerTestResult<float,4> DivisionTest(
2690 armnn::IWorkloadFactory& workloadFactory,
2691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002692{
2693 const unsigned int width = 2;
2694 const unsigned int height = 2;
2695 const unsigned int channelCount = 2;
2696 const unsigned int batchSize = 2;
2697
2698 unsigned int shape[] = { batchSize, channelCount, height, width };
2699
2700 std::vector<float> input0({
2701 2, 2, 2, 2, 3, 3, 3, 3,
2702 4, 4, 4, 4, 5, 5, 5, 5 });
2703
2704 std::vector<float> input1({
2705 1, 1, 1, 1, 2, 2, 2, 2,
2706 4, 4, 4, 4, 4, 4, 4, 4 });
2707
2708 std::vector<float> output({
2709 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2710 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2711
David Beck5cd01f32018-09-12 16:00:08 +01002712
Sadik Armagan2999a022019-04-09 14:20:12 +01002713 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2714 memoryManager,
2715 shape, input0, 1.0f, 0,
2716 shape, input1, 1.0f, 0,
2717 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002718}
2719
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002720LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2721 armnn::IWorkloadFactory& workloadFactory,
2722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002723{
2724 unsigned int shape0[] = { 1, 2, 2, 2 };
2725 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2726
2727 unsigned int shape1[] = { 1, 1, 1, 1 };
2728 std::vector<float> input1({ 2 });
2729
2730 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2731
David Beck5cd01f32018-09-12 16:00:08 +01002732
Sadik Armagan2999a022019-04-09 14:20:12 +01002733 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2734 memoryManager,
2735 shape0, input0, 1.0f, 0,
2736 shape1, input1, 1.0f, 0,
2737 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002738}
2739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002740LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2741 armnn::IWorkloadFactory& workloadFactory,
2742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002743{
2744 unsigned int shape0[] = { 1, 3, 3, 2 };
2745 std::vector<float> input0({
2746 1, 4, 3, 8, 5, 12,
2747 7, 16, 9, 20, 11, 24,
2748 13, 28, 15, 32, 17, 36});
2749
2750 unsigned int shape1[] = { 1, 1, 1, 2 };
2751 std::vector<float> input1({ 1, 2 });
2752
2753 std::vector<float> output({
2754 1, 2, 3, 4, 5, 6,
2755 7, 8, 9, 10, 11, 12,
2756 13, 14, 15, 16, 17, 18});
2757
Sadik Armagan2999a022019-04-09 14:20:12 +01002758 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2759 memoryManager,
2760 shape0, input0, 1.0f, 0,
2761 shape1, input1, 1.0f, 0,
2762 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002763}
2764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002765LayerTestResult<uint8_t,4> DivisionUint8Test(
2766 armnn::IWorkloadFactory& workloadFactory,
2767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002768{
2769 const unsigned int width = 2;
2770 const unsigned int height = 2;
2771 const unsigned int channelCount = 2;
2772 const unsigned int batchSize = 2;
2773
2774 unsigned int shape[] = { batchSize, channelCount, height, width };
2775
2776 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2777 4, 4, 4, 4, 5, 5, 5, 5 });
2778
2779 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2780 4, 4, 4, 4, 4, 4, 4, 4 });
2781
2782 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2783 4, 4, 4, 4, 5, 5, 5, 5});
2784
2785
Sadik Armagan2999a022019-04-09 14:20:12 +01002786 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2787 memoryManager,
2788 shape, input0, 1.0f, 0,
2789 shape, input1, 1.0f, 0,
2790 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002791}
2792
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002793LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2794 armnn::IWorkloadFactory& workloadFactory,
2795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002796{
2797 unsigned int shape0[] = { 1, 2, 2, 2 };
2798 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2799
2800 unsigned int shape1[] = { 1, 1, 1, 1 };
2801 std::vector<uint8_t> input1({ 2 });
2802
2803 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2804
Sadik Armagan2999a022019-04-09 14:20:12 +01002805 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2806 memoryManager,
2807 shape0, input0, 1.0f, 0,
2808 shape1, input1, 1.0f, 0,
2809 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002810}
2811
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002812LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2813 armnn::IWorkloadFactory& workloadFactory,
2814 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002815{
2816 unsigned int shape0[] = { 1, 3, 3, 2 };
2817 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2818 7, 16, 9, 20, 11, 24,
2819 13, 28, 15, 32, 17, 36});
2820
2821 unsigned int shape1[] = { 1, 1, 1, 2 };
2822 std::vector<uint8_t> input1({ 1, 2 });
2823
2824 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2825 7, 8, 9, 10, 11, 12,
2826 13, 14, 15, 16, 17, 18});
2827
Sadik Armagan2999a022019-04-09 14:20:12 +01002828 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2829 memoryManager,
2830 shape0, input0, 1.0f, 0,
2831 shape1, input1, 1.0f, 0,
2832 shape0, output, 1.0f, 0);
2833}
2834
2835LayerTestResult<int16_t,4> DivisionInt16Test(
2836 armnn::IWorkloadFactory& workloadFactory,
2837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2838{
2839 unsigned int shape[] = { 2, 2, 2, 2 };
2840
2841 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2842 4, 4, 4, 4, 5, 5, 5, 5 });
2843
2844 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2845 4, 4, 4, 4, 4, 4, 4, 4 });
2846
2847 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2848 4, 4, 4, 4, 5, 5, 5, 5});
2849
2850
2851 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2852 memoryManager,
2853 shape, input0, 1.0f, 0,
2854 shape, input1, 1.0f, 0,
2855 shape, output, 0.25f, 0);
2856}
2857
2858LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2859 armnn::IWorkloadFactory& workloadFactory,
2860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2861{
2862 unsigned int shape0[] = { 1, 2, 2, 2 };
2863 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2864
2865 unsigned int shape1[] = { 1, 1, 1, 1 };
2866 std::vector<int16_t> input1({ 2 });
2867
2868 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2869
2870 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2871 memoryManager,
2872 shape0, input0, 1.0f, 0,
2873 shape1, input1, 1.0f, 0,
2874 shape0, output, 1.0f, 0);
2875}
2876
2877LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2878 armnn::IWorkloadFactory& workloadFactory,
2879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2880{
2881 unsigned int shape0[] = { 1, 3, 3, 2 };
2882 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2883 7, 16, 9, 20, 11, 24,
2884 13, 28, 15, 32, 17, 36});
2885
2886 unsigned int shape1[] = { 1, 1, 1, 2 };
2887 std::vector<int16_t> input1({ 1, 2 });
2888
2889 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2890 7, 8, 9, 10, 11, 12,
2891 13, 14, 15, 16, 17, 18});
2892
2893 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2894 memoryManager,
2895 shape0, input0, 1.0f, 0,
2896 shape1, input1, 1.0f, 0,
2897 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002898}
2899
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002900template<typename DescriptorType>
2901std::unique_ptr<armnn::IWorkload> CreateWorkload(
2902 const armnn::IWorkloadFactory& workloadFactory,
2903 const armnn::WorkloadInfo& info,
2904 const DescriptorType& descriptor)
2905{
2906 return CreateWorkload(workloadFactory, info, descriptor);
2907};
2908
2909template<>
2910std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2911 const armnn::IWorkloadFactory& workloadFactory,
2912 const armnn::WorkloadInfo& info,
2913 const armnn::MaximumQueueDescriptor& descriptor)
2914{
2915 return workloadFactory.CreateMaximum(descriptor, info);
2916}
2917
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002918template<>
2919std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2920 const armnn::IWorkloadFactory& workloadFactory,
2921 const armnn::WorkloadInfo& info,
2922 const armnn::MinimumQueueDescriptor& descriptor)
2923{
2924 return workloadFactory.CreateMinimum(descriptor, info);
2925}
2926
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002927template<>
2928std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2929 const armnn::IWorkloadFactory& workloadFactory,
2930 const armnn::WorkloadInfo& info,
2931 const armnn::EqualQueueDescriptor& descriptor)
2932{
2933 return workloadFactory.CreateEqual(descriptor, info);
2934}
2935
FrancisMurtagh878f0232018-12-19 10:56:15 +00002936template<>
2937std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2938 const armnn::IWorkloadFactory& workloadFactory,
2939 const armnn::WorkloadInfo& info,
2940 const armnn::GreaterQueueDescriptor& descriptor)
2941{
2942 return workloadFactory.CreateGreater(descriptor, info);
2943}
2944
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002945namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002946
2947template <typename Descriptor,
2948 armnn::DataType ArmnnTypeInput,
2949 armnn::DataType ArmnnTypeOutput,
2950 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2951 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2952LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2953 armnn::IWorkloadFactory & workloadFactory,
2954 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2955 const unsigned int shape0[4], std::vector<TInput> values0,
2956 const unsigned int shape1[4], std::vector<TInput> values1,
2957 const unsigned int outShape[4], std::vector<TOutput> outValues,
2958 float qScale = 0.0f, int qOffset = 0)
2959{
2960 const size_t dimensionCount = 4;
2961 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2962 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2963 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2964
2965 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2966 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2967
2968 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002969 {
kevmay012b4d88e2019-01-24 14:05:09 +00002970 inputTensorInfo0.SetQuantizationScale(qScale);
2971 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002972
kevmay012b4d88e2019-01-24 14:05:09 +00002973 inputTensorInfo1.SetQuantizationScale(qScale);
2974 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002975
kevmay012b4d88e2019-01-24 14:05:09 +00002976 outputTensorInfo.SetQuantizationScale(qScale);
2977 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002978 }
kevmay012b4d88e2019-01-24 14:05:09 +00002979
2980 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2981
2982 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2983 {
2984 ret.compareBoolean = true;
2985 }
2986
2987 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2988 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2989 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2990
2991 Descriptor data;
2992 armnn::WorkloadInfo info;
2993 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2994 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2995 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2996 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2997
2998 inputHandle0->Allocate();
2999 inputHandle1->Allocate();
3000 outputHandle->Allocate();
3001
3002 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3003 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3004
Derek Lambertif30f7d32019-04-09 10:25:02 +01003005 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00003006 ExecuteWorkload(*workload, memoryManager);
3007
3008 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3009
3010 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
3011 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003012}
3013
kevmay012b4d88e2019-01-24 14:05:09 +00003014template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
3015LayerTestResult<T, 4> ElementwiseTestHelper(
3016 armnn::IWorkloadFactory & workloadFactory,
3017 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
3018 const unsigned int shape0[4], std::vector<T> values0,
3019 const unsigned int shape1[4], std::vector<T> values1,
3020 const unsigned int outShape[4], std::vector<T> outValues,
3021 float qScale = 0.0f, int qOffset = 0)
3022{
3023 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
3024 (workloadFactory,
3025 memoryManager,
3026 shape0,
3027 values0,
3028 shape1,
3029 values1,
3030 outShape,
3031 outValues,
3032 qScale,
3033 qOffset);
3034}
3035}
3036
3037LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003039{
3040 const unsigned int width = 2;
3041 const unsigned int height = 2;
3042 const unsigned int channelCount = 2;
3043 const unsigned int batchSize = 2;
3044
3045 unsigned int shape[] = { batchSize, channelCount, height, width };
3046
3047 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3048 3, 3, 3, 3, 4, 4, 4, 4 });
3049
3050 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3051 5, 5, 5, 5, 4, 4, 4, 4 });
3052
kevmay012b4d88e2019-01-24 14:05:09 +00003053 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3054 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003055
kevmay012b4d88e2019-01-24 14:05:09 +00003056 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003057 workloadFactory,
3058 memoryManager,
3059 shape,
3060 input0,
3061 shape,
3062 input1,
3063 shape,
3064 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003065}
3066
kevmay012b4d88e2019-01-24 14:05:09 +00003067LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003068 armnn::IWorkloadFactory& workloadFactory,
3069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3070{
3071 unsigned int shape0[] = { 1, 2, 2, 2 };
3072 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3073
3074 unsigned int shape1[] = { 1, 1, 1, 1 };
3075 std::vector<float> input1({ 1 });
3076
kevmay012b4d88e2019-01-24 14:05:09 +00003077 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003078
kevmay012b4d88e2019-01-24 14:05:09 +00003079 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003080 workloadFactory,
3081 memoryManager,
3082 shape0,
3083 input0,
3084 shape1,
3085 input1,
3086 shape0,
3087 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003088}
3089
kevmay012b4d88e2019-01-24 14:05:09 +00003090LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003091 armnn::IWorkloadFactory& workloadFactory,
3092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3093{
3094 const unsigned int shape0[] = { 1, 2, 2, 3 };
3095 const unsigned int shape1[] = { 1, 1, 1, 3 };
3096
3097 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3098 7, 8, 9, 10, 11, 12 });
3099
3100 std::vector<float> input1({ 1, 2, 3});
3101
kevmay012b4d88e2019-01-24 14:05:09 +00003102 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3103 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003104
kevmay012b4d88e2019-01-24 14:05:09 +00003105 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003106 workloadFactory,
3107 memoryManager,
3108 shape0,
3109 input0,
3110 shape1,
3111 input1,
3112 shape0,
3113 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003114}
3115
3116LayerTestResult<uint8_t, 4> EqualUint8Test(
3117 armnn::IWorkloadFactory& workloadFactory,
3118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3119{
3120 unsigned int shape[] = { 2, 2, 2, 2 };
3121
3122 // See dequantized values to the right.
3123 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003124 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003125
3126 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3127 3, 3, 3, 3, 5, 5, 5, 5 });
3128
3129 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3130 1, 1, 1, 1, 0, 0, 0, 0 });
3131
kevmay012b4d88e2019-01-24 14:05:09 +00003132 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3133 armnn::DataType::QuantisedAsymm8,
3134 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003135 workloadFactory,
3136 memoryManager,
3137 shape,
3138 input0,
3139 shape,
3140 input1,
3141 shape,
3142 output,
3143 1.0f,
3144 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003145}
3146
3147LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3148 armnn::IWorkloadFactory& workloadFactory,
3149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3150{
3151 const unsigned int shape0[] = { 1, 2, 2, 3 };
3152 const unsigned int shape1[] = { 1, 1, 1, 1 };
3153
3154 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3155 7, 8, 9, 10, 11, 12 });
3156
3157 std::vector<uint8_t> input1({ 1 });
3158
3159 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3160 0, 0, 0, 0, 0, 0 });
3161
kevmay012b4d88e2019-01-24 14:05:09 +00003162 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3163 armnn::DataType::QuantisedAsymm8,
3164 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003165 workloadFactory,
3166 memoryManager,
3167 shape0,
3168 input0,
3169 shape1,
3170 input1,
3171 shape0,
3172 output,
3173 1.0f,
3174 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003175}
3176
3177LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3178 armnn::IWorkloadFactory& workloadFactory,
3179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3180{
3181 const unsigned int shape0[] = { 1, 2, 2, 3 };
3182 const unsigned int shape1[] = { 1, 1, 1, 3 };
3183
3184 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3185 7, 8, 9, 10, 11, 12 });
3186
3187 std::vector<uint8_t> input1({ 1, 1, 3});
3188
3189 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3190 0, 0, 0, 0, 0, 0 });
3191
kevmay012b4d88e2019-01-24 14:05:09 +00003192 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3193 armnn::DataType::QuantisedAsymm8,
3194 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003195 workloadFactory,
3196 memoryManager,
3197 shape0,
3198 input0,
3199 shape1,
3200 input1,
3201 shape0,
3202 output,
3203 1.0f,
3204 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003205}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003206
kevmay012b4d88e2019-01-24 14:05:09 +00003207LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3209{
3210 const unsigned int width = 2;
3211 const unsigned int height = 2;
3212 const unsigned int channelCount = 2;
3213 const unsigned int batchSize = 2;
3214
3215 unsigned int shape[] = { batchSize, channelCount, height, width };
3216
3217 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3218 3, 3, 3, 3, 4, 4, 4, 4 });
3219
3220 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3221 5, 5, 5, 5, 4, 4, 4, 4 });
3222
kevmay012b4d88e2019-01-24 14:05:09 +00003223 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3224 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003225
kevmay012b4d88e2019-01-24 14:05:09 +00003226 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003227 workloadFactory,
3228 memoryManager,
3229 shape,
3230 input0,
3231 shape,
3232 input1,
3233 shape,
3234 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003235}
3236
kevmay012b4d88e2019-01-24 14:05:09 +00003237LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003238 armnn::IWorkloadFactory& workloadFactory,
3239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3240{
3241 unsigned int shape0[] = { 1, 2, 2, 2 };
3242 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3243
3244 unsigned int shape1[] = { 1, 1, 1, 1 };
3245 std::vector<float> input1({ 1 });
3246
kevmay012b4d88e2019-01-24 14:05:09 +00003247 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003248
kevmay012b4d88e2019-01-24 14:05:09 +00003249 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003250 workloadFactory,
3251 memoryManager,
3252 shape0,
3253 input0,
3254 shape1,
3255 input1,
3256 shape0,
3257 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003258}
3259
kevmay012b4d88e2019-01-24 14:05:09 +00003260LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003261 armnn::IWorkloadFactory& workloadFactory,
3262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3263{
3264 const unsigned int shape0[] = { 1, 2, 2, 3 };
3265 const unsigned int shape1[] = { 1, 1, 1, 3 };
3266
3267 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3268 7, 8, 9, 10, 11, 12 });
3269
3270 std::vector<float> input1({ 1, 3, 2});
3271
kevmay012b4d88e2019-01-24 14:05:09 +00003272 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3273 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003274
kevmay012b4d88e2019-01-24 14:05:09 +00003275 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003276 workloadFactory,
3277 memoryManager,
3278 shape0,
3279 input0,
3280 shape1,
3281 input1,
3282 shape0,
3283 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003284}
3285
3286LayerTestResult<uint8_t, 4> GreaterUint8Test(
3287 armnn::IWorkloadFactory& workloadFactory,
3288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3289{
3290 unsigned int shape[] = { 2, 2, 2, 2 };
3291
3292 // See dequantized values to the right.
3293 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3294 3, 3, 3, 3, 5, 5, 5, 5 });
3295
3296 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3297 2, 2, 2, 2, 5, 5, 5, 5 });
3298
3299 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3300 1, 1, 1, 1, 0, 0, 0, 0 });
3301
kevmay012b4d88e2019-01-24 14:05:09 +00003302 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3303 armnn::DataType::QuantisedAsymm8,
3304 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003305 workloadFactory,
3306 memoryManager,
3307 shape,
3308 input0,
3309 shape,
3310 input1,
3311 shape,
3312 output,
3313 1.0f,
3314 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003315}
3316
3317LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3318 armnn::IWorkloadFactory& workloadFactory,
3319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3320{
3321 const unsigned int shape0[] = { 1, 2, 2, 3 };
3322 const unsigned int shape1[] = { 1, 1, 1, 1 };
3323
3324 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3325 7, 8, 9, 10, 11, 12 });
3326
3327 std::vector<uint8_t> input1({ 1 });
3328
3329 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3330 1, 1, 1, 1, 1, 1 });
3331
kevmay012b4d88e2019-01-24 14:05:09 +00003332 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3333 armnn::DataType::QuantisedAsymm8,
3334 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003335 workloadFactory,
3336 memoryManager,
3337 shape0,
3338 input0,
3339 shape1,
3340 input1,
3341 shape0,
3342 output,
3343 1.0f,
3344 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003345}
3346
3347LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3348 armnn::IWorkloadFactory& workloadFactory,
3349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3350{
3351 const unsigned int shape0[] = { 1, 2, 2, 3 };
3352 const unsigned int shape1[] = { 1, 1, 1, 3 };
3353
3354 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3355 7, 8, 9, 10, 11, 12 });
3356
3357 std::vector<uint8_t> input1({ 1, 1, 3});
3358
3359 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3360 1, 1, 1, 1, 1, 1 });
3361
kevmay012b4d88e2019-01-24 14:05:09 +00003362 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3363 armnn::DataType::QuantisedAsymm8,
3364 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003365 workloadFactory,
3366 memoryManager,
3367 shape0,
3368 input0,
3369 shape1,
3370 input1,
3371 shape0,
3372 output,
3373 1.0f,
3374 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003375}
3376
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003377LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3379{
3380 const unsigned int width = 2;
3381 const unsigned int height = 2;
3382 const unsigned int channelCount = 2;
3383 const unsigned int batchSize = 2;
3384
3385 unsigned int shape[] = { batchSize, channelCount, height, width };
3386
3387 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3388 3, 3, 3, 3, 4, 4, 4, 4 });
3389
3390 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3391 4, 4, 4, 4, 5, 5, 5, 5 });
3392
3393 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3394 4, 4, 4, 4, 5, 5, 5, 5 });
3395
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003396 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3397 workloadFactory,
3398 memoryManager,
3399 shape,
3400 input0,
3401 shape,
3402 input1,
3403 shape,
3404 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003405}
3406
3407LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3408 armnn::IWorkloadFactory& workloadFactory,
3409 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3410{
3411 unsigned int shape0[] = { 1, 2, 2, 2 };
3412 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3413
3414 unsigned int shape1[] = { 1, 1, 1, 1 };
3415 std::vector<float> input1({ 2 });
3416
3417 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3418
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003419 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3420 workloadFactory,
3421 memoryManager,
3422 shape0,
3423 input0,
3424 shape1,
3425 input1,
3426 shape0,
3427 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003428}
3429
3430LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3431 armnn::IWorkloadFactory& workloadFactory,
3432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3433{
3434 const unsigned int shape0[] = { 1, 2, 2, 3 };
3435 const unsigned int shape1[] = { 1, 1, 1, 3 };
3436
3437 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3438 7, 8, 9, 10, 11, 12 });
3439
3440 std::vector<float> input1({ 1, 2, 3});
3441
3442 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003443 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003444
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003445 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3446 workloadFactory,
3447 memoryManager,
3448 shape0,
3449 input0,
3450 shape1,
3451 input1,
3452 shape0,
3453 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003454}
3455
3456LayerTestResult<uint8_t, 4> MaximumUint8Test(
3457 armnn::IWorkloadFactory& workloadFactory,
3458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3459{
3460 unsigned int shape[] = { 2, 2, 2, 2 };
3461
3462 // See dequantized values to the right.
3463 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3464 3, 3, 3, 3, 4, 4, 4, 4 });
3465
3466 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3467 4, 4, 4, 4, 5, 5, 5, 5 });
3468
3469 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3470 4, 4, 4, 4, 5, 5, 5, 5 });
3471
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003472 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3473 workloadFactory,
3474 memoryManager,
3475 shape,
3476 input0,
3477 shape,
3478 input1,
3479 shape,
3480 output,
3481 1.0f,
3482 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003483}
3484
3485LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3486 armnn::IWorkloadFactory& workloadFactory,
3487 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3488{
3489 const unsigned int shape0[] = { 1, 2, 2, 3 };
3490 const unsigned int shape1[] = { 1, 1, 1, 1 };
3491
3492 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3493 7, 8, 9, 10, 11, 12 });
3494
3495 std::vector<uint8_t> input1({2});
3496
3497 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3498 7, 8, 9, 10, 11, 12 });
3499
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003500 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3501 workloadFactory,
3502 memoryManager,
3503 shape0,
3504 input0,
3505 shape1,
3506 input1,
3507 shape0,
3508 output,
3509 1.0f,
3510 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003511}
3512
3513LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3514 armnn::IWorkloadFactory& workloadFactory,
3515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3516{
3517 const unsigned int shape0[] = { 1, 2, 2, 3 };
3518 const unsigned int shape1[] = { 1, 1, 1, 3 };
3519
3520 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3521 7, 8, 9, 10, 11, 12 });
3522
3523 std::vector<uint8_t> input1({ 1, 10, 3});
3524
3525 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3526 7, 10, 9, 10, 11, 12 });
3527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003528 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3529 workloadFactory,
3530 memoryManager,
3531 shape0,
3532 input0,
3533 shape1,
3534 input1,
3535 shape0,
3536 output,
3537 1.0f,
3538 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003539}
3540
Sadik Armagan2999a022019-04-09 14:20:12 +01003541LayerTestResult<int16_t, 4> MaximumInt16Test(
3542 armnn::IWorkloadFactory& workloadFactory,
3543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3544{
3545 unsigned int shape[] = { 2, 2, 2, 2 };
3546
3547 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3548 3, 3, 3, 3, 4, 4, 4, 4 });
3549
3550 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3551 4, 4, 4, 4, 5, 5, 5, 5 });
3552
3553 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3554 4, 4, 4, 4, 5, 5, 5, 5 });
3555
3556 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3557 workloadFactory,
3558 memoryManager,
3559 shape,
3560 input0,
3561 shape,
3562 input1,
3563 shape,
3564 output,
3565 1.0f,
3566 0);
3567}
3568
3569LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3570 armnn::IWorkloadFactory& workloadFactory,
3571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3572{
3573 const unsigned int shape0[] = { 1, 2, 2, 3 };
3574 const unsigned int shape1[] = { 1, 1, 1, 1 };
3575
3576 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3577 7, 8, 9, 10, 11, 12 });
3578
3579 std::vector<int16_t> input1({2});
3580
3581 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3582 7, 8, 9, 10, 11, 12 });
3583
3584 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3585 workloadFactory,
3586 memoryManager,
3587 shape0,
3588 input0,
3589 shape1,
3590 input1,
3591 shape0,
3592 output,
3593 1.0f,
3594 0);
3595}
3596
3597LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3598 armnn::IWorkloadFactory& workloadFactory,
3599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3600{
3601 const unsigned int shape0[] = { 1, 2, 2, 3 };
3602 const unsigned int shape1[] = { 1, 1, 1, 3 };
3603
3604 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3605 7, 8, 9, 10, 11, 12 });
3606
3607 std::vector<int16_t> input1({ 1, 10, 3});
3608
3609 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3610 7, 10, 9, 10, 11, 12 });
3611
3612 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3613 workloadFactory,
3614 memoryManager,
3615 shape0,
3616 input0,
3617 shape1,
3618 input1,
3619 shape0,
3620 output,
3621 1.0f,
3622 0);
3623}
3624
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003625LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3626 armnn::IWorkloadFactory& workloadFactory,
3627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3628{
3629 unsigned int shape0[] = { 1, 2, 2, 2 };
3630 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3631
3632 unsigned int shape1[] = { 1, 1, 1, 1 };
3633 std::vector<float> input1({ 2 });
3634
3635 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3636
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003637 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3638 workloadFactory,
3639 memoryManager,
3640 shape0,
3641 input0,
3642 shape1,
3643 input1,
3644 shape0,
3645 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003646}
3647
3648
3649LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3650 armnn::IWorkloadFactory& workloadFactory,
3651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3652{
3653 unsigned int shape0[] = { 1, 2, 2, 2 };
3654 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3655
3656 unsigned int shape1[] = { 1, 1, 1, 1 };
3657 std::vector<float> input1({ 5 });
3658
3659 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003661 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3662 workloadFactory,
3663 memoryManager,
3664 shape0,
3665 input0,
3666 shape1,
3667 input1,
3668 shape0,
3669 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003670}
3671
3672LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3673 armnn::IWorkloadFactory & workloadFactory,
3674 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3675{
3676 const unsigned int shape0[] = { 1, 2, 2, 3 };
3677 const unsigned int shape1[] = { 1, 1, 1, 3 };
3678
3679 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3680 7, 1, 2, 3, 4, 5 });
3681
3682 std::vector<uint8_t> input1({ 1, 2, 3});
3683
3684 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3685 1, 1, 2, 1, 2, 3 });
3686
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003687 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3688 workloadFactory,
3689 memoryManager,
3690 shape0,
3691 input0,
3692 shape1,
3693 input1,
3694 shape0,
3695 output,
3696 1.0f,
3697 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003698}
3699
Sadik Armagan2999a022019-04-09 14:20:12 +01003700LayerTestResult<int16_t, 4> MinimumInt16Test(
3701 armnn::IWorkloadFactory& workloadFactory,
3702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3703{
3704 unsigned int shape[] = { 2, 2, 2, 2 };
3705
3706 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3707 3, 3, 3, 3, 4, 4, 4, 4 });
3708
3709 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3710 4, 4, 4, 4, 5, 5, 5, 5 });
3711
3712 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3713 3, 3, 3, 3, 4, 4, 4, 4 });
3714
3715 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3716 workloadFactory,
3717 memoryManager,
3718 shape,
3719 input0,
3720 shape,
3721 input1,
3722 shape,
3723 output,
3724 1.0f,
3725 0);
3726}
3727
3728LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3729 armnn::IWorkloadFactory& workloadFactory,
3730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3731{
3732 const unsigned int shape0[] = { 1, 2, 2, 3 };
3733 const unsigned int shape1[] = { 1, 1, 1, 1 };
3734
3735 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3736 7, 8, 9, 10, 11, 12 });
3737
3738 std::vector<int16_t> input1({2});
3739
3740 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3741 2, 2, 2, 2, 2, 2 });
3742
3743 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3744 workloadFactory,
3745 memoryManager,
3746 shape0,
3747 input0,
3748 shape1,
3749 input1,
3750 shape0,
3751 output,
3752 1.0f,
3753 0);
3754}
3755
3756LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3757 armnn::IWorkloadFactory& workloadFactory,
3758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3759{
3760 const unsigned int shape0[] = { 1, 2, 2, 3 };
3761 const unsigned int shape1[] = { 1, 1, 1, 3 };
3762
3763 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3764 7, 8, 9, 10, 11, 12 });
3765
3766 std::vector<int16_t> input1({ 1, 10, 3});
3767
3768 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3769 1, 8, 3, 1, 10, 3 });
3770
3771 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3772 workloadFactory,
3773 memoryManager,
3774 shape0,
3775 input0,
3776 shape1,
3777 input1,
3778 shape0,
3779 output,
3780 1.0f,
3781 0);
3782}
3783
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003784namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003785LayerTestResult<float,4> MultiplicationTestHelper(
3786 armnn::IWorkloadFactory& workloadFactory,
3787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3788 const unsigned int shape0[4],
3789 const std::vector<float> & values0,
3790 const unsigned int shape1[4],
3791 const std::vector<float> & values1,
3792 const unsigned int outShape[4],
3793 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003794{
surmeh01bceff2f2018-03-29 16:29:27 +01003795 const size_t dimensionCount = 4;
3796 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3797 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3798 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003799
surmeh01bceff2f2018-03-29 16:29:27 +01003800 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3801 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003802
3803 LayerTestResult<float,4> ret(outputTensorInfo);
3804
3805 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3806 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3807 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3808
3809 armnn::MultiplicationQueueDescriptor data;
3810 armnn::WorkloadInfo info;
3811 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3812 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3813 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3814
3815 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3816
3817 inputHandle0->Allocate();
3818 inputHandle1->Allocate();
3819 outputHandle->Allocate();
3820
3821 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3822 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3823
Derek Lambertif30f7d32019-04-09 10:25:02 +01003824 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003825 workload->Execute();
3826
3827 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3828
surmeh01bceff2f2018-03-29 16:29:27 +01003829 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003830 return ret;
3831}
surmeh01bceff2f2018-03-29 16:29:27 +01003832} // anonymous namespace
3833
3834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003835LayerTestResult<float,4> MultiplicationTest(
3836 armnn::IWorkloadFactory& workloadFactory,
3837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003838{
3839 const unsigned int width = 2;
3840 const unsigned int height = 2;
3841 const unsigned int channelCount = 2;
3842 const unsigned int batchSize = 2;
3843
3844 unsigned int shape[] = { batchSize, channelCount, height, width };
3845
3846 std::vector<float> input0({
3847 1, 1, 1, 1, 2, 2, 2, 2,
3848 3, 3, 3, 3, 4, 4, 4, 4 });
3849
3850 std::vector<float> input1({
3851 2, 2, 2, 2, 3, 3, 3, 3,
3852 4, 4, 4, 4, 5, 5, 5, 5 });
3853
3854 std::vector<float> output({
3855 2, 2, 2, 2, 6, 6, 6, 6,
3856 12, 12, 12, 12, 20, 20, 20, 20 });
3857
3858 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003859 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003860 shape,
3861 input0,
3862 shape,
3863 input1,
3864 shape,
3865 output);
3866}
3867
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003868LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3869 armnn::IWorkloadFactory& workloadFactory,
3870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003871{
3872 unsigned int shape0[] = { 1, 2, 2, 2 };
3873 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3874
3875 unsigned int shape1[] = { 1, 1, 1, 1 };
3876 std::vector<float> input1({ 2 });
3877
3878 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3879
3880 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003881 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003882 shape0,
3883 input0,
3884 shape1,
3885 input1,
3886 shape0,
3887 output);
3888}
3889
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003890LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3891 armnn::IWorkloadFactory& workloadFactory,
3892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003893{
3894 unsigned int shape0[] = { 1, 3, 3, 2 };
3895 std::vector<float> input0({
3896 1, 2, 3, 4, 5, 6,
3897 7, 8, 9, 10, 11, 12,
3898 13, 14, 15, 16, 17, 18});
3899
3900 unsigned int shape1[] = { 1, 1, 1, 2 };
3901 std::vector<float> input1({ 1, 2 });
3902
3903 std::vector<float> output({
3904 1, 4, 3, 8, 5, 12,
3905 7, 16, 9, 20, 11, 24,
3906 13, 28, 15, 32, 17, 36});
3907
3908 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003909 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003910 shape0,
3911 input0,
3912 shape1,
3913 input1,
3914 shape0,
3915 output);
3916}
telsoa014fcda012018-03-09 14:13:49 +00003917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003918LayerTestResult<float,4> CompareMultiplicationTest(
3919 armnn::IWorkloadFactory& workloadFactory,
3920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3921 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003922{
3923 const unsigned int width = 16;
3924 const unsigned int height = 32;
3925 const unsigned int channelCount = 2;
3926 const unsigned int batchSize = 5;
3927
3928 armnn::TensorInfo inputTensorInfo0;
3929 armnn::TensorInfo inputTensorInfo1;
3930 armnn::TensorInfo outputTensorInfo;
3931
3932 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3933
3934 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3935 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3936 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3937
3938 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3939
3940 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3941 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3942
3943 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3944 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3945 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3946
3947 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3948 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3949 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3950
3951 armnn::MultiplicationQueueDescriptor data;
3952 armnn::WorkloadInfo info;
3953 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3954 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3955 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3956
3957 armnn::MultiplicationQueueDescriptor refData = data;
3958 armnn::WorkloadInfo refInfo = info;
3959 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3960 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3961 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3962
3963 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3964 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3965
3966 inputHandle0->Allocate();
3967 inputHandle1->Allocate();
3968 outputHandle->Allocate();
3969 inputHandle0Ref->Allocate();
3970 inputHandle1Ref->Allocate();
3971 outputHandleRef->Allocate();
3972
3973 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3974 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3975 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3976 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3977
Derek Lambertif30f7d32019-04-09 10:25:02 +01003978 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003979 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003980 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003981 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003982 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3983 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3984
3985 return comparisonResult;
3986}
3987
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003988LayerTestResult<float,4> CompareBatchNormTest(
3989 armnn::IWorkloadFactory& workloadFactory,
3990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3991 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003992{
3993 const unsigned int width = 2;
3994 const unsigned int height = 3;
3995 const unsigned int channels = 5;
3996 const unsigned int batchSize = 3;
3997
3998 armnn::TensorInfo inputTensorInfo;
3999 armnn::TensorInfo outputTensorInfo;
4000 armnn::TensorInfo tensorInfo;
4001
4002 constexpr unsigned int shape[] = {batchSize, channels, height, width};
4003 constexpr unsigned int tensorShape[] = {channels};
4004
4005 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4006 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
4007 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
4008
4009 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
4010
4011 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
4012 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
4013 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
4014 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
4015
4016 LayerTestResult<float,4> ret(outputTensorInfo);
4017
4018 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4019 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4020
4021 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
4022 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
4023
4024 armnn::BatchNormalizationQueueDescriptor data;
4025 armnn::WorkloadInfo info;
4026 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
4027 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
4028 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
4029 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
4030
4031 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4032 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4033 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4034 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4035
4036 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4037 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4038 data.m_Mean = &meanTensor;
4039 data.m_Variance = &varianceTensor;
4040 data.m_Beta = &betaTensor;
4041 data.m_Gamma = &gammaTensor;
4042 data.m_Parameters.m_Eps = 0.01f;
4043
4044 armnn::BatchNormalizationQueueDescriptor refData = data;
4045 armnn::WorkloadInfo refInfo = info;
4046 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4047 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4048
4049 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4050 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4051
4052 inputHandle->Allocate();
4053 outputHandle->Allocate();
4054 inputHandleRef->Allocate();
4055 outputHandleRef->Allocate();
4056
4057 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4058 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4059
Derek Lambertif30f7d32019-04-09 10:25:02 +01004060 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004061 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004062 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004063 workloadRef->Execute();
4064
4065 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4066 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4067
4068 return ret;
4069}
4070
surmeh013537c2c2018-05-18 16:31:43 +01004071template<typename T>
4072void PermuteTensorData(
4073 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004075 const armnn::PermutationVector& mappings,
4076 armnn::TensorInfo & inputTensorInfo,
4077 const T * inputData,
4078 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004079{
surmeh013537c2c2018-05-18 16:31:43 +01004080 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4081 if (inputData == nullptr)
4082 {
4083 // Nullptr is an error in the test. By returning without doing the concatenation
4084 // I expect the caller to fail the test. It still makes sense to report this as
4085 // an assert for Debug builds.
4086 return;
4087 }
telsoa014fcda012018-03-09 14:13:49 +00004088
surmeh013537c2c2018-05-18 16:31:43 +01004089 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4090
4091 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4092 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4093
4094 armnn::PermuteQueueDescriptor queueDescriptor;
4095 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4096 armnn::WorkloadInfo workloadInfo;
4097 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4098 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4099
4100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4101
4102 inputHandle->Allocate();
4103 outputHandle->Allocate();
4104
4105 CopyDataToITensorHandle(inputHandle.get(), inputData);
4106
Derek Lambertif30f7d32019-04-09 10:25:02 +01004107 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004108 workload->Execute();
4109
4110 outputData.resize(outputTensorInfo.GetNumElements());
4111 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4112 inputTensorInfo = outputTensorInfo;
4113}
4114
Jim Flynn825af452019-05-20 12:49:28 +01004115armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004116 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4117 unsigned int concatDim)
4118{
telsoa014fcda012018-03-09 14:13:49 +00004119 std::vector<armnn::TensorShape> shapes;
4120 shapes.reserve(inputTensorInfos.size());
4121 for (const armnn::TensorInfo& it: inputTensorInfos)
4122 {
4123 shapes.push_back(it.GetShape());
4124 }
surmeh013537c2c2018-05-18 16:31:43 +01004125
Jim Flynn825af452019-05-20 12:49:28 +01004126 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4127 shapes.end(),
4128 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004129}
4130
4131//
narpra015cdda352018-11-19 15:30:27 +00004132// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4133// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4134// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004135//
4136
4137bool NeedPermuteForConcat(
4138 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4139 unsigned int concatDim)
4140{
4141 // See note above. Additionally we expect the input shapes to have the
4142 // same number of dimensions.
4143 unsigned int nDimensions = 0;
4144
telsoa01c577f2c2018-08-31 09:22:23 +01004145 // Determine the number of dimensions as well as sanity check them
4146 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004147 for (auto && tensorInfo : inputTensorInfos)
4148 {
4149 if (!nDimensions)
4150 {
4151 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4152 }
4153 else
4154 {
4155 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4156 "Input shapes must have the same number of dimensions");
4157 }
4158 }
4159
narpra015cdda352018-11-19 15:30:27 +00004160 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004161}
4162
4163armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4164{
4165 unsigned int numDims = inputShape.GetNumDimensions();
4166 if (numDims >= 3)
4167 {
4168 // Nothing to do if the inputShape has at least 3 dimensions.
4169 return inputShape;
4170 }
4171
4172 std::vector<unsigned int> newDims(size_t(3), 1u);
4173 unsigned int expandedBy = 3 - numDims;
4174 for (unsigned int i=0; i<numDims; ++i)
4175 {
4176 newDims[expandedBy+i] = inputShape[i];
4177 }
4178 return armnn::TensorShape(3u, &newDims[0]);
4179}
4180
4181void Generate3dPermuteVectorForConcat(
4182 unsigned int numDimensions,
4183 unsigned int & concatDim,
4184 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4185{
4186 BOOST_ASSERT_MSG(numDimensions <= 3,
4187 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004188 unsigned int expandedBy = 3 - numDimensions;
4189 unsigned int expandedConcatAxis = concatDim + expandedBy;
4190
4191 if (expandedConcatAxis == 2)
4192 {
4193 concatDim = 0;
4194 armnn::PermutationVector forwardPermutation({1, 2, 0});
4195 armnn::PermutationVector reversePermutation({2, 0, 1});
4196 permutations = std::make_pair(forwardPermutation, reversePermutation);
4197 }
4198 else if (expandedConcatAxis == 1)
4199 {
4200 concatDim = 0;
4201 armnn::PermutationVector forwardPermutation({2, 0, 1});
4202 armnn::PermutationVector reversePermutation({1, 2, 0});
4203 permutations = std::make_pair(forwardPermutation, reversePermutation);
4204 }
4205 else
4206 {
4207 BOOST_ASSERT(expandedConcatAxis == 0);
4208 concatDim = 0;
4209 }
4210}
4211
4212//
4213// Permute the input tensors so we can do a supported concatenation.
4214// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4215// at the front. Finally this function tells what the output shape
4216// of the permuted concatenated tensor is going to be.
4217//
4218template <typename T>
4219void PermuteInputsForConcat(
4220 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004222 std::vector<armnn::TensorInfo> & inputTensorInfos,
4223 std::vector<T *> & inputData,
4224 std::vector<std::vector<T>> & inputDataStorage,
4225 armnn::PermutationVector & permuteVector,
4226 unsigned int & concatDim,
4227 armnn::TensorInfo & outputTensorInfo)
4228{
4229 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4230 "Expecting more than one tensor to be concatenated here");
4231
4232 unsigned int numDims = 0;
4233 unsigned int nthInput = 0;
4234 const armnn::PermutationVector identity({0, 1, 2});
4235
4236 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4237 std::make_pair(identity, identity);
4238
4239 inputDataStorage.resize(inputData.size());
4240
4241 for (auto && tensorInfo : inputTensorInfos)
4242 {
4243 if (numDims == 0)
4244 {
4245 numDims = tensorInfo.GetShape().GetNumDimensions();
4246 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004247
telsoa01c577f2c2018-08-31 09:22:23 +01004248 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004249 permuteVector = permutations.second;
4250 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4251 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4252 }
4253 else
4254 {
4255 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4256 "All inputs must have the same number of dimensions");
4257 }
4258
4259 armnn::TensorInfo newTensorInfo = tensorInfo;
4260 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4261
4262 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004263 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004264 permutations.first,
4265 newTensorInfo,
4266 inputData[nthInput],
4267 inputDataStorage[nthInput]);
4268
4269 inputData[nthInput] = inputDataStorage[nthInput].data();
4270 inputTensorInfos[nthInput] = newTensorInfo;
4271
4272 ++nthInput;
4273 }
4274
4275 outputTensorInfo.SetShape(
4276 armnnUtils::Permuted(
4277 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4278 permutations.first));
4279}
4280
4281
4282//
4283// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004284// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004285// output.
4286//
4287template <typename T>
4288void PermuteOutputForConcat(
4289 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004291 const armnn::TensorInfo & tensorInfo,
4292 const armnn::PermutationVector & permuteVector,
4293 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4294 T * data)
4295{
4296 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4297 if (data == nullptr)
4298 {
4299 // Nullptr is an error in the test. By returning without doing the permutation
4300 // I expect the caller to fail the test. It still makes sense to report this as
4301 // an assert for Debug builds.
4302 return;
4303 }
4304
4305 armnn::TensorInfo resultTensorInfo = tensorInfo;
4306 std::vector<T> inputData(tensorInfo.GetNumElements());
4307 std::vector<T> outputData;
4308
4309 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4310
4311 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004312 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004313 permuteVector,
4314 resultTensorInfo,
4315 &inputData[0],
4316 outputData);
4317
4318 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4319}
4320
4321template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004322void Concatenate(
4323 armnn::IWorkloadFactory& workloadFactory,
4324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4325 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4326 std::initializer_list<T *> inputsOrig,
4327 const armnn::TensorInfo& outputTensorInfoOrig,
4328 T * output,
narpra015cdda352018-11-19 15:30:27 +00004329 unsigned int concatDim,
4330 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004331{
4332 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4333 if (output == nullptr)
4334 {
4335 // Nullptr is an error in the test. By returning without doing the permutation
4336 // I expect the caller to fail the test. It still makes sense to report this as
4337 // an assert for Debug builds.
4338 return;
4339 }
4340
telsoa01c577f2c2018-08-31 09:22:23 +01004341 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004342 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4343 std::vector<T *> inputs = inputsOrig;
4344 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4345
4346 armnn::PermutationVector permuteVector{0, 1, 2};
4347
telsoa01c577f2c2018-08-31 09:22:23 +01004348 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004349 std::vector<std::vector<T>> tmpInputDataStorage;
4350
4351 const size_t inputCount = inputTensorInfos.size();
4352
4353 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4354
4355 if (needPermuteForConcat)
4356 {
4357 //
4358 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004359 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004360 //
4361 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004362 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004363 inputTensorInfos,
4364 inputs,
4365 tmpInputDataStorage,
4366 permuteVector,
4367 concatDim,
4368 outputTensorInfo);
4369 }
4370
narpra015cdda352018-11-19 15:30:27 +00004371 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004372
4373 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4374 inputHandles.reserve(inputCount);
4375
narpra015cdda352018-11-19 15:30:27 +00004376 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4377
Jim Flynne242f2d2019-05-22 14:24:13 +01004378 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004379 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004380 queueDescriptor.m_Parameters = viewsDescriptor;
4381
4382 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004383 {
narpra015cdda352018-11-19 15:30:27 +00004384 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4385 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4386 {
4387 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4388 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4389 }
telsoa014fcda012018-03-09 14:13:49 +00004390
narpra015cdda352018-11-19 15:30:27 +00004391 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004392
narpra015cdda352018-11-19 15:30:27 +00004393 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4394 for (unsigned int i = 0; i < inputCount; ++i)
4395 {
4396 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4397 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4398 subTensorsSupported ?
4399 workloadFactory.CreateSubTensorHandle(*outputHandle,
4400 inputTensorInfo.GetShape(),
4401 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4402 workloadFactory.CreateTensorHandle(inputTensorInfo);
4403
4404 inputHandles.emplace_back(std::move(inputHandle));
4405 }
4406
telsoa014fcda012018-03-09 14:13:49 +00004407 }
narpra015cdda352018-11-19 15:30:27 +00004408 else
4409 {
4410 for (unsigned int i = 0; i < inputCount; ++i)
4411 {
4412 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4413 inputHandles.emplace_back(std::move(inputHandle));
4414 }
4415 }
telsoa014fcda012018-03-09 14:13:49 +00004416
4417 for (unsigned int i = 0; i < inputCount; ++i)
4418 {
surmeh013537c2c2018-05-18 16:31:43 +01004419 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004420 }
4421
4422 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4423
Jim Flynn4ed6c832019-05-20 11:02:46 +01004424 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004425
4426 for (auto& inputHandle : inputHandles)
4427 {
4428 inputHandle->Allocate();
4429 }
4430
4431 outputHandle->Allocate();
4432
4433 unsigned int nextInputId = 0;
4434 for (auto& inputHandle : inputHandles)
4435 {
surmeh013537c2c2018-05-18 16:31:43 +01004436 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4437 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004438 }
4439
Derek Lambertif30f7d32019-04-09 10:25:02 +01004440 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004441 workload->Execute();
4442
surmeh013537c2c2018-05-18 16:31:43 +01004443 if (needPermuteForConcat)
4444 {
4445 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004446 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004447 outputTensorInfo,
4448 permuteVector,
4449 std::move(outputHandle),
4450 output);
4451 }
4452 else
4453 {
4454 CopyDataFromITensorHandle(output, outputHandle.get());
4455 }
telsoa014fcda012018-03-09 14:13:49 +00004456}
4457
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004458template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004459LayerTestResult<T, 1> Concatenation1dTestImpl(
4460 armnn::IWorkloadFactory& workloadFactory,
4461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4462 float qScale,
4463 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004464{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004465 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004466
4467 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4468 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4469 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4470
Jim Flynncbb66aa2019-05-15 13:03:54 +01004471 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004472
4473 LayerTestResult<T, 1> result(outputTensorInfo);
4474
4475 std::vector<T> output;
4476 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004477 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004478 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4479 { input0.data(), input1.data(), input2.data() },
4480 outputTensorInfo,
4481 output.data(),
4482 0,
4483 true);
telsoa014fcda012018-03-09 14:13:49 +00004484
4485 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4486 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4487 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4488 }));
4489
4490 return result;
4491}
4492
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004493LayerTestResult<float, 1> Concatenation1dTest(
4494 armnn::IWorkloadFactory& workloadFactory,
4495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004496{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004497 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004498}
4499
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004500template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004501LayerTestResult<T, 2> Concatenation2dTestImpl(
4502 armnn::IWorkloadFactory& workloadFactory,
4503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004504 const armnn::TensorInfo& outputTensorInfo,
4505 unsigned int dimension,
4506 const float qScale,
4507 const int32_t qOffset)
4508{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004509 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004510
4511 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4512 // Batch 0
4513 1.0f, 2.0f, 3.0f,
4514
4515 // Batch 1
4516 10.0f, 11.0f, 12.0f,
4517 }));
4518
4519 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4520 // Batch 0
4521 4.0f, 5.0f, 6.0f,
4522
4523 // Batch 1
4524 13.0f, 14.0f, 15.0f,
4525 }));
4526
4527 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4528 // Batch 0
4529 7.0f, 8.0f, 9.0f,
4530
4531 // Batch 1
4532 16.0f, 17.0f, 18.0f,
4533 }));
4534
4535 LayerTestResult<T, 2> result(outputTensorInfo);
4536
4537 std::vector<T> output;
4538 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004539 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004540 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4541 { input0.data(), input1.data(), input2.data() },
4542 outputTensorInfo,
4543 output.data(),
4544 dimension,
4545 true);
telsoa014fcda012018-03-09 14:13:49 +00004546
4547 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4548 return result;
4549}
4550
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004551template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004552LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4553 armnn::IWorkloadFactory& workloadFactory,
4554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4555 float qScale,
4556 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004557{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004558 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004559
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004560 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4561 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4562
telsoa014fcda012018-03-09 14:13:49 +00004563 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4564 // Batch 0
4565 1.0f, 2.0f, 3.0f,
4566
4567 // Batch 1
4568 10.0f, 11.0f, 12.0f,
4569
4570 // Batch 2
4571 4.0f, 5.0f, 6.0f,
4572
4573 // Batch 3
4574 13.0f, 14.0f, 15.0f,
4575
4576 // Batch 4
4577 7.0f, 8.0f, 9.0f,
4578
4579 // Batch 5
4580 16.0f, 17.0f, 18.0f,
4581 }));
4582
4583 return result;
4584}
4585
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004586LayerTestResult<float, 2> Concatenation2dDim0Test(
4587 armnn::IWorkloadFactory& workloadFactory,
4588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004589{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004590 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004591}
4592
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004593template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004594LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4595 armnn::IWorkloadFactory& workloadFactory,
4596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4597 float qScale,
4598 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004599{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004600 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004601
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004602 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4603 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4604
telsoa014fcda012018-03-09 14:13:49 +00004605 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4606 // Batch 0
4607 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4608
4609 // Batch 1
4610 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4611 }));
4612
4613 return result;
4614}
4615
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004616LayerTestResult<float, 2> Concatenation2dDim1Test(
4617 armnn::IWorkloadFactory& workloadFactory,
4618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004620 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004621}
4622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004623template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004624LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4625 armnn::IWorkloadFactory& workloadFactory,
4626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4627 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004628 int32_t qOffset)
4629{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004630 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004631 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4632 // Batch 0
4633 1.0f, 2.0f, 3.0f,
4634
4635 // Batch 1
4636 10.0f, 11.0f, 12.0f,
4637 }));
4638
Jim Flynncbb66aa2019-05-15 13:03:54 +01004639 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004640 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4641 // Batch 0
4642 4.0f, 5.0f, 6.0f,
4643
4644 // Batch 1
4645 13.0f, 14.0f, 15.0f,
4646
4647 // Batch 0
4648 7.0f, 8.0f, 9.0f,
4649 }));
4650
Jim Flynncbb66aa2019-05-15 13:03:54 +01004651 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004652 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4653 // Batch 1
4654 16.0f, 17.0f, 18.0f,
4655 }));
4656
Jim Flynncbb66aa2019-05-15 13:03:54 +01004657 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004658 LayerTestResult<T, 2> result(outputTensorInfo);
4659
4660 std::vector<T> output;
4661 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004662 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004663 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4664 { input0.data(), input1.data(), input2.data() },
4665 outputTensorInfo,
4666 output.data(),
4667 0,
4668 true);
telsoa014fcda012018-03-09 14:13:49 +00004669
4670 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4671 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4672 // Batch 0
4673 1.0f, 2.0f, 3.0f,
4674
4675 // Batch 1
4676 10.0f, 11.0f, 12.0f,
4677
4678 // Batch 2
4679 4.0f, 5.0f, 6.0f,
4680
4681 // Batch 3
4682 13.0f, 14.0f, 15.0f,
4683
4684 // Batch 4
4685 7.0f, 8.0f, 9.0f,
4686
4687 // Batch 5
4688 16.0f, 17.0f, 18.0f,
4689 }));
4690
4691 return result;
4692}
4693
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004694LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4695 armnn::IWorkloadFactory& workloadFactory,
4696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004697{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004698 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4699 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004700}
4701
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004702template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004703LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4704 armnn::IWorkloadFactory& workloadFactory,
4705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4706 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004707 int32_t qOffset)
4708{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004709 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004710 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4711 // Batch 0
4712 1.0f, 2.0f, 3.0f,
4713
4714 // Batch 1
4715 10.0f, 11.0f, 12.0f,
4716 }));
4717
Jim Flynncbb66aa2019-05-15 13:03:54 +01004718 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004719 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4720 // Batch 0
4721 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4722
4723 // Batch 1
4724 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4725 }));
4726
Jim Flynncbb66aa2019-05-15 13:03:54 +01004727 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004728 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4729 // Batch 0
4730 9.0f,
4731
4732 // Batch 1
4733 18.0f
4734 }));
4735
Jim Flynncbb66aa2019-05-15 13:03:54 +01004736 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004737 LayerTestResult<T, 2> result(outputTensorInfo);
4738
4739 std::vector<T> output;
4740 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004741 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004742 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4743 { input0.data(), input1.data(), input2.data() },
4744 outputTensorInfo,
4745 output.data(),
4746 1,
4747 true);
telsoa014fcda012018-03-09 14:13:49 +00004748
4749 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4750 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4751 // Batch 0
4752 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4753
4754 // Batch 1
4755 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4756 }));
4757
4758 return result;
4759}
4760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004761LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4762 armnn::IWorkloadFactory& workloadFactory,
4763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004765 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4766 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004767}
4768
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004769template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004770LayerTestResult<T, 3> Concatenation3dTestImpl(
4771 armnn::IWorkloadFactory& workloadFactory,
4772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004773 const armnn::TensorInfo& outputTensorInfo,
4774 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004775 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004776 float qScale,
4777 int32_t qOffset)
4778{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004779 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004780
4781 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4782 // Batch 0, Channel 0
4783 1.0f, 2.0f,
4784
4785 // Batch 0, Channel 1
4786 3.0f, 4.0f,
4787
4788 // Batch 0, Channel 2
4789 5.0f, 6.0f,
4790
4791 // Batch 1, Channel 0
4792 19.0f, 20.0f,
4793
4794 // Batch 1, Channel 1
4795 21.0f, 22.0f,
4796
4797 // Batch 1, Channel 2
4798 23.0f, 24.0f
4799 }));
4800
4801 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4802 // Batch 0, Channel 0
4803 7.0f, 8.0f,
4804
4805 // Batch 0, Channel 1
4806 9.0f, 10.0f,
4807
4808 // Batch 0, Channel 2
4809 11.0f, 12.0f,
4810
4811 // Batch 1, Channel 0
4812 25.0f, 26.0f,
4813
4814 // Batch 1, Channel 1
4815 27.0f, 28.0f,
4816
4817 // Batch 1, Channel 2
4818 29.0f, 30.0f
4819 }));
4820
4821 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4822 // Batch 0, Channel 0
4823 13.0f, 14.0f,
4824
4825 // Batch 0, Channel 1
4826 15.0f, 16.0f,
4827
4828 // Batch 0, Channel 2
4829 17.0f, 18.0f,
4830
4831 // Batch 1, Channel 0
4832 31.0f, 32.0f,
4833
4834 // Batch 1, Channel 1
4835 33.0f, 34.0f,
4836
4837 // Batch 1, Channel 2
4838 35.0f, 36.0f
4839 }));
4840
4841 LayerTestResult<T, 3> result(outputTensorInfo);
4842
4843 std::vector<T> output;
4844 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004845 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004846 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4847 { input0.data(), input1.data(), input2.data() },
4848 outputTensorInfo,
4849 output.data(),
4850 dimension,
4851 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004852
4853 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4854 return result;
4855}
4856
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004857template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004858LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4859 armnn::IWorkloadFactory& workloadFactory,
4860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4861 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004862 int32_t qOffset)
4863{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004864 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004865
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004866 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4867 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4868
telsoa014fcda012018-03-09 14:13:49 +00004869 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4870 // Batch 0, Channel 0
4871 1.0f, 2.0f,
4872
4873 // Batch 0, Channel 1
4874 3.0f, 4.0f,
4875
4876 // Batch 0, Channel 2
4877 5.0f, 6.0f,
4878
4879 // Batch 1, Channel 0
4880 19.0f, 20.0f,
4881
4882 // Batch 1, Channel 1
4883 21.0f, 22.0f,
4884
4885 // Batch 1, Channel 2
4886 23.0f, 24.0f,
4887
4888 // Batch 2, Channel 0
4889 7.0f, 8.0f,
4890
4891 // Batch 2, Channel 1
4892 9.0f, 10.0f,
4893
4894 // Batch 2, Channel 2
4895 11.0f, 12.0f,
4896
4897 // Batch 3, Channel 0
4898 25.0f, 26.0f,
4899
4900 // Batch 3, Channel 1
4901 27.0f, 28.0f,
4902
4903 // Batch 3, Channel 2
4904 29.0f, 30.0f,
4905
4906 // Batch 4, Channel 0
4907 13.0f, 14.0f,
4908
4909 // Batch 4, Channel 1
4910 15.0f, 16.0f,
4911
4912 // Batch 4, Channel 2
4913 17.0f, 18.0f,
4914
4915 // Batch 5, Channel 0
4916 31.0f, 32.0f,
4917
4918 // Batch 5, Channel 1
4919 33.0f, 34.0f,
4920
4921 // Batch 5, Channel 2
4922 35.0f, 36.0f
4923 }));
narpra015cdda352018-11-19 15:30:27 +00004924
telsoa014fcda012018-03-09 14:13:49 +00004925 return result;
4926}
4927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004928LayerTestResult<float, 3> Concatenation3dDim0Test(
4929 armnn::IWorkloadFactory& workloadFactory,
4930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004931{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004932 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004933}
4934
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004935template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004936LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4937 armnn::IWorkloadFactory& workloadFactory,
4938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4939 float qScale,
4940 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004941{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004942 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004943
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004944 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4945 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004946
telsoa014fcda012018-03-09 14:13:49 +00004947 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4948 // Batch 0, Channel 0
4949 1.0f, 2.0f,
4950
4951 // Batch 0, Channel 1
4952 3.0f, 4.0f,
4953
4954 // Batch 0, Channel 2
4955 5.0f, 6.0f,
4956
4957 // Batch 0, Channel 3
4958 7.0f, 8.0f,
4959
4960 // Batch 0, Channel 4
4961 9.0f, 10.0f,
4962
4963 // Batch 0, Channel 5
4964 11.0f, 12.0f,
4965
4966 // Batch 0, Channel 6
4967 13.0f, 14.0f,
4968
4969 // Batch 0, Channel 7
4970 15.0f, 16.0f,
4971
4972 // Batch 0, Channel 8
4973 17.0f, 18.0f,
4974
4975 // Batch 1, Channel 0
4976 19.0f, 20.0f,
4977
4978 // Batch 1, Channel 1
4979 21.0f, 22.0f,
4980
4981 // Batch 1, Channel 2
4982 23.0f, 24.0f,
4983
4984 // Batch 1, Channel 3
4985 25.0f, 26.0f,
4986
4987 // Batch 1, Channel 4
4988 27.0f, 28.0f,
4989
4990 // Batch 1, Channel 5
4991 29.0f, 30.0f,
4992
4993 // Batch 1, Channel 6
4994 31.0f, 32.0f,
4995
4996 // Batch 1, Channel 7
4997 33.0f, 34.0f,
4998
4999 // Batch 1, Channel 8
5000 35.0f, 36.0f
5001 }));
5002
5003 return result;
5004}
5005
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005006LayerTestResult<float, 3> Concatenation3dDim1Test(
5007 armnn::IWorkloadFactory& workloadFactory,
5008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005009{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005010 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005011}
5012
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005013template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005014LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
5015 armnn::IWorkloadFactory& workloadFactory,
5016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005017 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005018 float qScale,
5019 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00005020{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005021 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005022
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005023 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
5024 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005025
telsoa014fcda012018-03-09 14:13:49 +00005026 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5027 // Batch 0, Channel 0
5028 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
5029
5030 // Batch 0, Channel 1
5031 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5032
5033 // Batch 0, Channel 2
5034 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5035
5036 // Batch 1, Channel 0
5037 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5038
5039 // Batch 1, Channel 1
5040 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5041
5042 // Batch 1, Channel 2
5043 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5044 }));
5045
5046 return result;
5047}
5048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005049LayerTestResult<float, 3> Concatenation3dDim2Test(
5050 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5052 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005053{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005054 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5055 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005056}
5057
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005058template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005059LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5060 armnn::IWorkloadFactory& workloadFactory,
5061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5062 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005063 int32_t qOffset)
5064{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005065 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005066 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5067 // Batch 0, Channel 0
5068 1.0f, 2.0f,
5069
5070 // Batch 0, Channel 1
5071 3.0f, 4.0f,
5072
5073 // Batch 0, Channel 2
5074 5.0f, 6.0f,
5075
5076 // Batch 1, Channel 0
5077 19.0f, 20.0f,
5078
5079 // Batch 1, Channel 1
5080 21.0f, 22.0f,
5081
5082 // Batch 1, Channel 2
5083 23.0f, 24.0f
5084 }));
5085
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005086 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005087 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5088 // Batch 0, Channel 0
5089 7.0f, 8.0f,
5090
5091 // Batch 0, Channel 1
5092 9.0f, 10.0f,
5093
5094 // Batch 0, Channel 2
5095 11.0f, 12.0f,
5096 }));
5097
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005098 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005099 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5100 // Batch 0, Channel 0
5101 25.0f, 26.0f,
5102
5103 // Batch 0, Channel 1
5104 27.0f, 28.0f,
5105
5106 // Batch 0, Channel 2
5107 29.0f, 30.0f,
5108
5109 // Batch 1, Channel 0
5110 13.0f, 14.0f,
5111
5112 // Batch 1, Channel 1
5113 15.0f, 16.0f,
5114
5115 // Batch 1, Channel 2
5116 17.0f, 18.0f,
5117
5118 // Batch 2, Channel 0
5119 31.0f, 32.0f,
5120
5121 // Batch 2, Channel 1
5122 33.0f, 34.0f,
5123
5124 // Batch 2, Channel 2
5125 35.0f, 36.0f
5126 }));
5127
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005128 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005129 LayerTestResult<T, 3> result(outputTensorInfo);
5130
5131 std::vector<T> output;
5132 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005133 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005134 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5135 { input0.data(), input1.data(), input2.data() },
5136 outputTensorInfo,
5137 output.data(),
5138 0,
5139 true);
telsoa014fcda012018-03-09 14:13:49 +00005140
5141 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5142 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5143 // Batch 0, Channel 0
5144 1.0f, 2.0f,
5145
5146 // Batch 0, Channel 1
5147 3.0f, 4.0f,
5148
5149 // Batch 0, Channel 2
5150 5.0f, 6.0f,
5151
5152 // Batch 1, Channel 0
5153 19.0f, 20.0f,
5154
5155 // Batch 1, Channel 1
5156 21.0f, 22.0f,
5157
5158 // Batch 1, Channel 2
5159 23.0f, 24.0f,
5160
5161 // Batch 2, Channel 0
5162 7.0f, 8.0f,
5163
5164 // Batch 2, Channel 1
5165 9.0f, 10.0f,
5166
5167 // Batch 2, Channel 2
5168 11.0f, 12.0f,
5169
5170 // Batch 3, Channel 0
5171 25.0f, 26.0f,
5172
5173 // Batch 3, Channel 1
5174 27.0f, 28.0f,
5175
5176 // Batch 3, Channel 2
5177 29.0f, 30.0f,
5178
5179 // Batch 4, Channel 0
5180 13.0f, 14.0f,
5181
5182 // Batch 4, Channel 1
5183 15.0f, 16.0f,
5184
5185 // Batch 4, Channel 2
5186 17.0f, 18.0f,
5187
5188 // Batch 5, Channel 0
5189 31.0f, 32.0f,
5190
5191 // Batch 5, Channel 1
5192 33.0f, 34.0f,
5193
5194 // Batch 5, Channel 2
5195 35.0f, 36.0f
5196 }));
5197
5198 return result;
5199}
5200
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005201LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5202 armnn::IWorkloadFactory& workloadFactory,
5203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005204{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005205 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5206 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005207}
5208
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005209template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005210LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5211 armnn::IWorkloadFactory& workloadFactory,
5212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5213 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005214 int32_t qOffset)
5215{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005216 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005217 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5218 // Batch 0, Channel 0
5219 1.0f, 2.0f,
5220
5221 // Batch 0, Channel 1
5222 3.0f, 4.0f,
5223
5224 // Batch 0, Channel 2
5225 5.0f, 6.0f,
5226
5227 // Batch 1, Channel 0
5228 19.0f, 20.0f,
5229
5230 // Batch 1, Channel 1
5231 21.0f, 22.0f,
5232
5233 // Batch 1, Channel 2
5234 23.0f, 24.0f
5235 }));
5236
Jim Flynncbb66aa2019-05-15 13:03:54 +01005237 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005238 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5239 // Batch 0, Channel 0
5240 7.0f, 8.0f,
5241
5242 // Batch 0, Channel 1
5243 9.0f, 10.0f,
5244
5245 // Batch 0, Channel 2
5246 11.0f, 12.0f,
5247
5248 // Batch 0, Channel 3
5249 25.0f, 26.0f,
5250
5251 // Batch 1, Channel 0
5252 27.0f, 28.0f,
5253
5254 // Batch 1, Channel 1
5255 29.0f, 30.0f,
5256
5257 // Batch 1, Channel 2
5258 13.0f, 14.0f,
5259
5260 // Batch 1, Channel 3
5261 15.0f, 16.0f,
5262 }));
5263
Jim Flynncbb66aa2019-05-15 13:03:54 +01005264 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005265 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5266 // Batch 0, Channel 0
5267 17.0f, 18.0f,
5268
5269 // Batch 1, Channel 0
5270 31.0f, 32.0f,
5271 }));
5272
Jim Flynncbb66aa2019-05-15 13:03:54 +01005273 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005274 LayerTestResult<T, 3> result(outputTensorInfo);
5275
5276 std::vector<T> output;
5277 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005278 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005279 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5280 { input0.data(), input1.data(), input2.data() },
5281 outputTensorInfo,
5282 output.data(),
5283 1,
5284 true);
telsoa014fcda012018-03-09 14:13:49 +00005285
5286 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5287 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5288 // Batch 0, Channel 0
5289 1.0f, 2.0f,
5290
5291 // Batch 0, Channel 1
5292 3.0f, 4.0f,
5293
5294 // Batch 0, Channel 2
5295 5.0f, 6.0f,
5296
5297 // Batch 0, Channel 3
5298 7.0f, 8.0f,
5299
5300 // Batch 0, Channel 4
5301 9.0f, 10.0f,
5302
5303 // Batch 0, Channel 5
5304 11.0f, 12.0f,
5305
5306 // Batch 0, Channel 6
5307 25.0f, 26.0f,
5308
5309 // Batch 0, Channel 7
5310 17.0f, 18.0f,
5311
5312 // Batch 1, Channel 0
5313 19.0f, 20.0f,
5314
5315 // Batch 1, Channel 1
5316 21.0f, 22.0f,
5317
5318 // Batch 1, Channel 2
5319 23.0f, 24.0f,
5320
5321 // Batch 1, Channel 3
5322 27.0f, 28.0f,
5323
5324 // Batch 1, Channel 4
5325 29.0f, 30.0f,
5326
5327 // Batch 1, Channel 5
5328 13.0f, 14.0f,
5329
5330 // Batch 1, Channel 6
5331 15.0f, 16.0f,
5332
5333 // Batch 1, Channel 7
5334 31.0f, 32.0f,
5335 }));
5336
5337 return result;
5338}
5339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005340LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5341 armnn::IWorkloadFactory& workloadFactory,
5342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005343{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005344 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5345 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005346}
5347
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005348template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005349LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5350 armnn::IWorkloadFactory& workloadFactory,
5351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005352 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005353 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005354 int32_t qOffset)
5355{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005356 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005357 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5358 // Batch 0, Channel 0
5359 1.0f, 2.0f,
5360
5361 // Batch 0, Channel 1
5362 3.0f, 4.0f,
5363
5364 // Batch 0, Channel 2
5365 5.0f, 6.0f,
5366
5367 // Batch 1, Channel 0
5368 19.0f, 20.0f,
5369
5370 // Batch 1, Channel 1
5371 21.0f, 22.0f,
5372
5373 // Batch 1, Channel 2
5374 23.0f, 24.0f
5375 }));
5376
Jim Flynncbb66aa2019-05-15 13:03:54 +01005377 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005378 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5379 // Batch 0, Channel 0
5380 7.0f,
5381
5382 // Batch 0, Channel 1
5383 9.0f,
5384
5385 // Batch 0, Channel 2
5386 11.0f,
5387
5388 // Batch 1, Channel 0
5389 25.0f,
5390
5391 // Batch 1, Channel 1
5392 27.0f,
5393
5394 // Batch 1, Channel 2
5395 29.0f
5396 }));
5397
Jim Flynncbb66aa2019-05-15 13:03:54 +01005398 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005399 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5400 // Batch 0, Channel 0
5401 13.0f, 14.0f, 50.0f,
5402
5403 // Batch 0, Channel 1
5404 15.0f, 16.0f, 51.0f,
5405
5406 // Batch 0, Channel 2
5407 17.0f, 18.0f, 52.0f,
5408
5409 // Batch 1, Channel 0
5410 31.0f, 32.0f, 53.0f,
5411
5412 // Batch 1, Channel 1
5413 33.0f, 34.0f, 54.0f,
5414
5415 // Batch 1, Channel 2
5416 35.0f, 36.0f, 55.0f,
5417 }));
5418
Jim Flynncbb66aa2019-05-15 13:03:54 +01005419 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005420 LayerTestResult<T, 3> result(outputTensorInfo);
5421
5422 std::vector<T> output;
5423 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005424 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005425 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5426 { input0.data(), input1.data(), input2.data() },
5427 outputTensorInfo,
5428 output.data(),
5429 2,
5430 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005431
5432 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5433 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5434 // Batch 0, Channel 0
5435 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5436
5437 // Batch 0, Channel 1
5438 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5439
5440 // Batch 0, Channel 2
5441 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5442
5443 // Batch 1, Channel 0
5444 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5445
5446 // Batch 1, Channel 1
5447 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5448
5449 // Batch 1, Channel 2
5450 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5451 }));
5452
5453 return result;
5454}
5455
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005456LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5457 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5459 bool useSubtensor)
5460{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005461 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5462 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005463}
5464
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005465template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005466LayerTestResult<T, 4> Concatenation4dTestImpl(
5467 armnn::IWorkloadFactory& workloadFactory,
5468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5469 const armnn::TensorInfo& outputTensorInfo,
5470 unsigned int dimension,
5471 bool useSubtensor,
5472 float qScale,
5473 int32_t qOffset)
5474{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005475 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005476
5477 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5478 1.0f, 2.0f,
5479 3.0f, 4.0f,
5480 5.0f, 6.0f,
5481 7.0f, 8.0f,
5482 9.0f, 10.0f,
5483 11.0f, 12.0f
5484 }));
5485
5486 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5487 11.0f, 12.0f,
5488 13.0f, 14.0f,
5489 15.0f, 16.0f,
5490 17.0f, 18.0f,
5491 19.0f, 20.0f,
5492 21.0f, 22.0f
5493 }));
5494
5495 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5496 21.0f, 22.0f,
5497 23.0f, 24.0f,
5498 25.0f, 26.0f,
5499 27.0f, 28.0f,
5500 29.0f, 30.0f,
5501 31.0f, 32.0f
5502 }));
5503
5504 LayerTestResult<T, 4> result(outputTensorInfo);
5505
5506 std::vector<T> output;
5507 output.resize(outputTensorInfo.GetNumElements());
5508
5509 Concatenate<T>(workloadFactory,
5510 memoryManager,
5511 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5512 {input0.data(), input1.data(), input2.data()},
5513 outputTensorInfo,
5514 output.data(),
5515 dimension,
5516 useSubtensor);
5517
5518 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5519 return result;
5520}
5521
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005522template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005523LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5524 armnn::IWorkloadFactory& workloadFactory,
5525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5526 float qScale,
5527 int32_t qOffset)
5528{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005529 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005530
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005531 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5532 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5533
narpra015cdda352018-11-19 15:30:27 +00005534 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5535 1.0f, 2.0f,
5536 3.0f, 4.0f,
5537 5.0f, 6.0f,
5538 7.0f, 8.0f,
5539 9.0f, 10.0f,
5540 11.0f, 12.0f,
5541
5542 11.0f, 12.0f,
5543 13.0f, 14.0f,
5544 15.0f, 16.0f,
5545 17.0f, 18.0f,
5546 19.0f, 20.0f,
5547 21.0f, 22.0f,
5548
5549 21.0f, 22.0f,
5550 23.0f, 24.0f,
5551 25.0f, 26.0f,
5552 27.0f, 28.0f,
5553 29.0f, 30.0f,
5554 31.0f, 32.0f
5555 }));
5556 return result;
5557}
5558
5559LayerTestResult<float, 4> Concatenation4dDim0Test(
5560 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005562{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005563 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005564}
5565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005566template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005567LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5568 armnn::IWorkloadFactory& workloadFactory,
5569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5570 float qScale,
5571 int32_t qOffset)
5572{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005573 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005574
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005575 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5576 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5577
narpra015cdda352018-11-19 15:30:27 +00005578 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5579 1.0f, 2.0f,
5580 3.0f, 4.0f,
5581 5.0f, 6.0f,
5582 7.0f, 8.0f,
5583 9.0f, 10.0f,
5584 11.0f, 12.0f,
5585
5586 11.0f, 12.0f,
5587 13.0f, 14.0f,
5588 15.0f, 16.0f,
5589 17.0f, 18.0f,
5590 19.0f, 20.0f,
5591 21.0f, 22.0f,
5592
5593 21.0f, 22.0f,
5594 23.0f, 24.0f,
5595 25.0f, 26.0f,
5596 27.0f, 28.0f,
5597 29.0f, 30.0f,
5598 31.0f, 32.0f
5599 }));
5600
5601 return result;
5602}
5603
5604LayerTestResult<float, 4> Concatenation4dDim1Test(
5605 armnn::IWorkloadFactory& workloadFactory,
5606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5607{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005608 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005609}
5610
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005611template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005612LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5613 armnn::IWorkloadFactory& workloadFactory,
5614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5615 float qScale,
5616 int32_t qOffset)
5617{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005618 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005619
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005620 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5621 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5622
narpra015cdda352018-11-19 15:30:27 +00005623 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5624 1.0f, 2.0f,
5625 3.0f, 4.0f,
5626 11.0f, 12.0f,
5627 13.0f, 14.0f,
5628 21.0f, 22.0f,
5629 23.0f, 24.0f,
5630
5631 5.0f, 6.0f,
5632 7.0f, 8.0f,
5633 15.0f, 16.0f,
5634 17.0f, 18.0f,
5635 25.0f, 26.0f,
5636 27.0f, 28.0f,
5637
5638 9.0f, 10.0f,
5639 11.0f, 12.0f,
5640 19.0f, 20.0f,
5641 21.0f, 22.0f,
5642 29.0f, 30.0f,
5643 31.0f, 32.0f
5644 }));
5645
5646 return result;
5647}
5648
5649LayerTestResult<float, 4> Concatenation4dDim2Test(
5650 armnn::IWorkloadFactory& workloadFactory,
5651 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5652{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005653 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005654}
5655
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005656template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005657LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5658 armnn::IWorkloadFactory& workloadFactory,
5659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5660 float qScale,
5661 int32_t qOffset,
5662 bool useSubtensor)
5663{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005664 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005665
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005666 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5667 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5668
narpra015cdda352018-11-19 15:30:27 +00005669 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5670 1.0f, 2.0f,
5671 11.0f, 12.0f,
5672 21.0f, 22.0f,
5673 3.0f, 4.0f,
5674 13.0f, 14.0f,
5675 23.0f, 24.0f,
5676
5677 5.0f, 6.0f,
5678 15.0f, 16.0f,
5679 25.0f, 26.0f,
5680 7.0f, 8.0f,
5681 17.0f, 18.0f,
5682 27.0f, 28.0f,
5683
5684 9.0f, 10.0f,
5685 19.0f, 20.0f,
5686 29.0f, 30.0f,
5687 11.0f, 12.0f,
5688 21.0f, 22.0f,
5689 31.0f, 32.0f
5690 }));
5691
5692 return result;
5693}
5694
5695LayerTestResult<float, 4> Concatenation4dDim3Test(
5696 armnn::IWorkloadFactory& workloadFactory,
5697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5698 bool useSubtensor)
5699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005700 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5701 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005702}
5703
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005704template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005705LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5706 armnn::IWorkloadFactory& workloadFactory,
5707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5708 float qScale,
5709 int32_t qOffset)
5710{
5711 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005712 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005713
5714 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5715 1.0f, 2.0f,
5716 3.0f, 4.0f,
5717 5.0f, 6.0f,
5718 7.0f, 8.0f,
5719 9.0f, 10.0f,
5720 11.0f, 12.0f
5721 }));
5722
Jim Flynncbb66aa2019-05-15 13:03:54 +01005723 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005724
5725 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5726 11.0f, 12.0f,
5727 13.0f, 14.0f,
5728 15.0f, 16.0f,
5729 17.0f, 18.0f,
5730 19.0f, 20.0f,
5731 21.0f, 22.0f,
5732
5733 21.0f, 22.0f,
5734 23.0f, 24.0f,
5735 25.0f, 26.0f,
5736 27.0f, 28.0f,
5737 29.0f, 30.0f,
5738 31.0f, 32.0f
5739
5740 }));
5741
Jim Flynncbb66aa2019-05-15 13:03:54 +01005742 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005743
5744 LayerTestResult<T, 4> result(outputTensorInfo);
5745
5746 std::vector<T> output;
5747 output.resize(outputTensorInfo.GetNumElements());
5748 Concatenate<T>(workloadFactory,
5749 memoryManager,
5750 {inputTensorInfo0, inputTensorInfo1},
5751 {input0.data(), input1.data()},
5752 outputTensorInfo,
5753 output.data(),
5754 dimension,
5755 true);
5756
5757 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5758 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5759 1.0f, 2.0f,
5760 3.0f, 4.0f,
5761 5.0f, 6.0f,
5762 7.0f, 8.0f,
5763 9.0f, 10.0f,
5764 11.0f, 12.0f,
5765
5766 11.0f, 12.0f,
5767 13.0f, 14.0f,
5768 15.0f, 16.0f,
5769 17.0f, 18.0f,
5770 19.0f, 20.0f,
5771 21.0f, 22.0f,
5772
5773 21.0f, 22.0f,
5774 23.0f, 24.0f,
5775 25.0f, 26.0f,
5776 27.0f, 28.0f,
5777 29.0f, 30.0f,
5778 31.0f, 32.0f
5779 }));
5780
5781 return result;
5782}
5783
5784LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5785 armnn::IWorkloadFactory& workloadFactory,
5786 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5787{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005788 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5789 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005790}
5791
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005792template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005793LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5794 armnn::IWorkloadFactory& workloadFactory,
5795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5796 float qScale,
5797 int32_t qOffset)
5798{
5799 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005800 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005801
5802 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5803 1.0f, 2.0f,
5804 3.0f, 4.0f,
5805 5.0f, 6.0f,
5806 7.0f, 8.0f,
5807 9.0f, 10.0f,
5808 11.0f, 12.0f
5809 }));
5810
Jim Flynncbb66aa2019-05-15 13:03:54 +01005811 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005812
5813 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5814 11.0f, 12.0f,
5815 13.0f, 14.0f,
5816 15.0f, 16.0f,
5817 17.0f, 18.0f,
5818
5819 }));
5820
Jim Flynncbb66aa2019-05-15 13:03:54 +01005821 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005822
5823 LayerTestResult<T, 4> result(outputTensorInfo);
5824
5825 std::vector<T> output;
5826 output.resize(outputTensorInfo.GetNumElements());
5827 Concatenate<T>(workloadFactory,
5828 memoryManager,
5829 {inputTensorInfo0, inputTensorInfo1},
5830 {input0.data(), input1.data()},
5831 outputTensorInfo,
5832 output.data(),
5833 dimension,
5834 true);
5835
5836 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5837 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5838 1.0f, 2.0f,
5839 3.0f, 4.0f,
5840 5.0f, 6.0f,
5841 7.0f, 8.0f,
5842 9.0f, 10.0f,
5843 11.0f, 12.0f,
5844 11.0f, 12.0f,
5845 13.0f, 14.0f,
5846 15.0f, 16.0f,
5847 17.0f, 18.0f
5848 }));
5849
5850 return result;
5851}
5852
5853LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5854 armnn::IWorkloadFactory& workloadFactory,
5855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5856{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005857 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5858 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005859}
5860
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005861template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005862LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5863 armnn::IWorkloadFactory& workloadFactory,
5864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5865 float qScale,
5866 int32_t qOffset)
5867{
5868 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005869 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005870
5871 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5872 1.0f, 2.0f,
5873 3.0f, 4.0f,
5874 5.0f, 6.0f,
5875 7.0f, 8.0f,
5876 9.0f, 10.0f,
5877 11.0f, 12.0f
5878 }));
5879
Jim Flynncbb66aa2019-05-15 13:03:54 +01005880 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005881
5882 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5883 11.0f, 12.0f,
5884 13.0f, 14.0f,
5885 15.0f, 16.0f,
5886 17.0f, 18.0f,
5887 19.0f, 20.0f,
5888 21.0f, 22.0f,
5889 23.0f, 24.0f,
5890 25.0f, 26.0f,
5891 27.0f, 28.0f
5892 }));
5893
Jim Flynncbb66aa2019-05-15 13:03:54 +01005894 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005895
5896 LayerTestResult<T, 4> result(outputTensorInfo);
5897
5898 std::vector<T> output;
5899 output.resize(outputTensorInfo.GetNumElements());
5900 Concatenate<T>(workloadFactory,
5901 memoryManager,
5902 {inputTensorInfo0, inputTensorInfo1},
5903 {input0.data(), input1.data()},
5904 outputTensorInfo,
5905 output.data(),
5906 dimension,
5907 true);
5908
5909 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5910 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5911 1.0f, 2.0f,
5912 3.0f, 4.0f,
5913 11.0f, 12.0f,
5914 13.0f, 14.0f,
5915 15.0f, 16.0f,
5916
5917 5.0f, 6.0f,
5918 7.0f, 8.0f,
5919 17.0f, 18.0f,
5920 19.0f, 20.0f,
5921 21.0f, 22.0f,
5922
5923 9.0f, 10.0f,
5924 11.0f, 12.0f,
5925 23.0f, 24.0f,
5926 25.0f, 26.0f,
5927 27.0f, 28.0f
5928 }));
5929
5930 return result;
5931}
5932
5933LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5934 armnn::IWorkloadFactory& workloadFactory,
5935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5936{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005937 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5938 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005939}
5940
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005941template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005942LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5943 armnn::IWorkloadFactory& workloadFactory,
5944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5945 float qScale,
5946 int32_t qOffset,
5947 bool useSubtensor)
5948{
5949 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005950 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005951
5952 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5953 1.0f, 2.0f,
5954 3.0f, 4.0f,
5955 5.0f, 6.0f,
5956 7.0f, 8.0f,
5957 9.0f, 10.0f,
5958 11.0f, 12.0f
5959 }));
5960
Jim Flynncbb66aa2019-05-15 13:03:54 +01005961 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005962
5963 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5964 11.0f, 12.0f, 13.0f,
5965 14.0f, 15.0f, 16.0f,
5966
5967 17.0f, 18.0f, 19.0f,
5968 20.0f, 21.0f, 22.0f,
5969
5970 23.0f, 24.0f, 25.0f,
5971 26.0f, 27.0f, 28.0f
5972 }));
5973
Jim Flynncbb66aa2019-05-15 13:03:54 +01005974 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005975
5976 LayerTestResult<T, 4> result(outputTensorInfo);
5977
5978 std::vector<T> output;
5979 output.resize(outputTensorInfo.GetNumElements());
5980 Concatenate<T>(workloadFactory,
5981 memoryManager,
5982 {inputTensorInfo0, inputTensorInfo1},
5983 {input0.data(), input1.data()},
5984 outputTensorInfo,
5985 output.data(),
5986 dimension,
5987 useSubtensor);
5988
5989 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5990 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5991 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5992 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5993 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5994 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5995 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5996 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5997 }));
5998
5999 return result;
6000}
6001
6002LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
6003 armnn::IWorkloadFactory& workloadFactory,
6004 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6005 bool useSubtensor)
6006{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006007 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
6008 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006009}
6010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006011LayerTestResult<float, 2> FakeQuantizationTest(
6012 armnn::IWorkloadFactory& workloadFactory,
6013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006014{
6015 constexpr unsigned int width = 2;
6016 constexpr unsigned int height = 3;
6017
6018 const armnn::TensorInfo tensorInfo({height, width },
6019 armnn::DataType::Float32);
6020 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6021 -10.0f, -5.0f,
6022 0.0f, 5.0f,
6023 10.0f, 10.0f
6024 }));
6025
6026 LayerTestResult<float, 2> ret(tensorInfo);
6027
6028 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6029
6030 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
6031
6032 armnn::FakeQuantizationQueueDescriptor data;
6033 armnn::WorkloadInfo info;
6034
6035 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6036 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6037 float min = -10.f;
6038 float max = 10.f;
6039
6040 data.m_Parameters.m_Min = min;
6041 data.m_Parameters.m_Max = max;
6042
6043 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6044 armnn::FakeQuantizationQueueDescriptor refData = data;
6045 armnn::WorkloadInfo refInfo = info;
6046 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6047
6048 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6049
6050 inputHandle->Allocate();
6051 outputHandle->Allocate();
6052
6053 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6054
Derek Lambertif30f7d32019-04-09 10:25:02 +01006055 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006056 workload->Execute();
6057
6058 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6059
6060 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6061 0.0f, 63.0f,
6062 128.0f, 191.0f,
6063 255.0f, 255.0f
6064 }));
6065 return ret;
6066}
6067
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006068namespace
6069{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006070template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6071LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006072 armnn::IWorkloadFactory& workloadFactory,
6073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6074 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006075 float scale,
6076 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006077 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006078 float outScale,
6079 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006080 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006081 const armnn::DataLayout layout,
6082 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006083{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006084 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6085 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006086
jimfly013aab7c32018-11-12 13:32:08 +00006087 // at this point if we require it permute the input data
6088 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6089 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006090 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006091 {
6092 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006093 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006094 inputData = tmp;
6095 }
6096
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006097 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6098 inputTensorInfo.GetQuantizationScale(),
6099 inputTensorInfo.GetQuantizationOffset(),
6100 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006101
jimfly013aab7c32018-11-12 13:32:08 +00006102 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006103 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006104 {
6105 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006106 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6107 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006108 expectedOutputData = tmp;
6109 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006110
6111 LayerTestResult<T, 4> result(outputTensorInfo);
6112 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6113 outputTensorInfo.GetQuantizationScale(),
6114 outputTensorInfo.GetQuantizationOffset(),
6115 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006116
6117 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6118 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6119
6120 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006121 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006122 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006123 armnn::WorkloadInfo info;
6124
6125 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6126 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6127
6128 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6129
6130 inputHandle->Allocate();
6131 outputHandle->Allocate();
6132
6133 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6134
Derek Lambertif30f7d32019-04-09 10:25:02 +01006135 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006136 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006137
6138 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6139
6140 return result;
6141}
6142
6143float CalcInvL2Norm(std::initializer_list<float> elements)
6144{
6145 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6146 [](float acc, float element) { return acc + element * element; });
6147 return 1.0f / sqrtf(reduction);
6148}
6149
6150} // anonymous namespace
6151
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006152template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006153LayerTestResult<T, 2> Pad2dTestCommon(
6154 armnn::IWorkloadFactory& workloadFactory,
6155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6156 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006157 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006158 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006159{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006160 const armnn::TensorShape inputShape{ 3, 3 };
6161 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006162
David Monahan34757812019-06-19 11:47:21 +01006163 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6164 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006165
Derek Lambertif30f7d32019-04-09 10:25:02 +01006166 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006167 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006168 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006169 // Height (3) x Width (3)
6170 4, 8, 6,
6171 7, 4, 4,
6172 3, 2, 4
6173 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006174
Teresa Charlinec8e1982019-07-02 16:24:09 +01006175 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006176 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006177 expectedOutputValues = (
6178 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006179 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006180 p, p, p, p, p, p, p,
6181 p, p, p, p, p, p, p,
6182 p, p, 4, 8, 6, p, p,
6183 p, p, 7, 4, 4, p, p,
6184 p, p, 3, 2, 4, p, p,
6185 p, p, p, p, p, p, p,
6186 p, p, p, p, p, p, p
6187 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006188
Derek Lambertif30f7d32019-04-09 10:25:02 +01006189 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006190
Derek Lambertif30f7d32019-04-09 10:25:02 +01006191 LayerTestResult<T, 2> result(outputTensorInfo);
6192 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006193
Derek Lambertif30f7d32019-04-09 10:25:02 +01006194 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6195 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006196
Derek Lambertif30f7d32019-04-09 10:25:02 +01006197 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006198
Teresa Charlinec8e1982019-07-02 16:24:09 +01006199 std::vector<std::pair<unsigned int, unsigned int>> padList;
6200 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6201 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006202
Teresa Charlinec8e1982019-07-02 16:24:09 +01006203 descriptor.m_Parameters.m_PadList = padList;
6204 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006205 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006206
Derek Lambertif30f7d32019-04-09 10:25:02 +01006207 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6208 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006209
Derek Lambertif30f7d32019-04-09 10:25:02 +01006210 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006211
Derek Lambertif30f7d32019-04-09 10:25:02 +01006212 inputHandle->Allocate();
6213 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006214
Derek Lambertif30f7d32019-04-09 10:25:02 +01006215 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006216
Derek Lambertif30f7d32019-04-09 10:25:02 +01006217 workload->PostAllocationConfigure();
6218 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006219
Derek Lambertif30f7d32019-04-09 10:25:02 +01006220 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006221
Derek Lambertif30f7d32019-04-09 10:25:02 +01006222 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006223}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006224
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006225template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006226LayerTestResult<T, 3> Pad3dTestCommon(
6227 armnn::IWorkloadFactory& workloadFactory,
6228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6229 float qScale,
6230 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006231{
6232 const armnn::TensorShape inputShape{ 2, 2, 2 };
6233 const armnn::TensorShape outputShape{ 3, 5, 6 };
6234
David Monahan34757812019-06-19 11:47:21 +01006235 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6236 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006237
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006238 std::vector<T> inputValues(
6239 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006240 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006241 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006242 0, 4,
6243 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006244
6245 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006246 6, 1,
6247 5, 2
6248 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006249
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006250 std::vector<T> expectedOutputValues(
6251 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006252 {
6253
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006254 0, 0, 0, 0, 0, 0,
6255 0, 0, 0, 0, 0, 0,
6256 0, 0, 0, 4, 0, 0,
6257 0, 0, 2, 5, 0, 0,
6258 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006259
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006260 0, 0, 0, 0, 0, 0,
6261 0, 0, 0, 0, 0, 0,
6262 0, 0, 6, 1, 0, 0,
6263 0, 0, 5, 2, 0, 0,
6264 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006265
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006266 0, 0, 0, 0, 0, 0,
6267 0, 0, 0, 0, 0, 0,
6268 0, 0, 0, 0, 0, 0,
6269 0, 0, 0, 0, 0, 0,
6270 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006271
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006272 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006273
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006274 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006275
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006276 LayerTestResult<T, 3> result(outputTensorInfo);
6277 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006278
6279 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6280 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6281
6282 armnn::PadQueueDescriptor descriptor;
6283
6284 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6285 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6286 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6287 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6288
6289 descriptor.m_Parameters.m_PadList = PadList;
6290 armnn::WorkloadInfo info;
6291
6292 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6293 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6294
6295 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6296
6297 inputHandle->Allocate();
6298 outputHandle->Allocate();
6299
6300 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6301
Derek Lambertif30f7d32019-04-09 10:25:02 +01006302 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006303 workload->Execute();
6304
6305 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6306
6307 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006308}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006309
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006310template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006311LayerTestResult<T, 4> Pad4dTestCommon(
6312 armnn::IWorkloadFactory& workloadFactory,
6313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6314 float qScale,
6315 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006316{
6317 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6318 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6319
David Monahan34757812019-06-19 11:47:21 +01006320 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6321 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006322
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006323 std::vector<T> inputValues(
6324 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006325 {
6326 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006327 0, 1,
6328 2, 3,
6329 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006330
6331 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006332 6, 7,
6333 8, 9,
6334 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006335
6336 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006337 12, 13,
6338 14, 15,
6339 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006340
6341 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006342 18, 19,
6343 20, 21,
6344 22, 23
6345 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006346
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006347 std::vector<T> expectedOutputValues(
6348 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006349 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006350 0, 0, 0, 0,
6351 0, 0, 0, 0,
6352 0, 0, 0, 0,
6353 0, 0, 0, 0,
6354 0, 0, 0, 0,
6355 0, 0, 0, 0,
6356 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006357
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006358 0, 0, 0, 0,
6359 0, 0, 0, 0,
6360 0, 0, 0, 0,
6361 0, 0, 0, 0,
6362 0, 0, 0, 0,
6363 0, 0, 0, 0,
6364 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006365
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006366 0, 0, 0, 0,
6367 0, 0, 0, 0,
6368 0, 0, 0, 0,
6369 0, 0, 0, 0,
6370 0, 0, 0, 0,
6371 0, 0, 0, 0,
6372 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006373
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006374 0, 0, 0, 0,
6375 0, 0, 0, 0,
6376 0, 0, 0, 0,
6377 0, 0, 0, 0,
6378 0, 0, 0, 0,
6379 0, 0, 0, 0,
6380 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006381
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006382 0, 0, 0, 0,
6383 0, 0, 0, 0,
6384 0, 0, 0, 0,
6385 0, 0, 0, 0,
6386 0, 0, 0, 0,
6387 0, 0, 0, 0,
6388 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006389
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006390 0, 0, 0, 0,
6391 0, 0, 0, 0,
6392 0, 0, 0, 0,
6393 0, 0, 0, 0,
6394 0, 0, 0, 0,
6395 0, 0, 0, 0,
6396 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006397
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006398 0, 0, 0, 0,
6399 0, 0, 0, 0,
6400 0, 0, 0, 0,
6401 0, 0, 0, 0,
6402 0, 0, 0, 0,
6403 0, 0, 0, 0,
6404 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006405
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006406 0, 0, 0, 0,
6407 0, 0, 0, 0,
6408 0, 0, 0, 0,
6409 0, 0, 1, 0,
6410 0, 2, 3, 0,
6411 0, 4, 5, 0,
6412 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006413
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006414 0, 0, 0, 0,
6415 0, 0, 0, 0,
6416 0, 0, 0, 0,
6417 0, 6, 7, 0,
6418 0, 8, 9, 0,
6419 0, 10, 11, 0,
6420 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006421
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006422 0, 0, 0, 0,
6423 0, 0, 0, 0,
6424 0, 0, 0, 0,
6425 0, 0, 0, 0,
6426 0, 0, 0, 0,
6427 0, 0, 0, 0,
6428 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006429
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006430 0, 0, 0, 0,
6431 0, 0, 0, 0,
6432 0, 0, 0, 0,
6433 0, 0, 0, 0,
6434 0, 0, 0, 0,
6435 0, 0, 0, 0,
6436 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006437
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006438 0, 0, 0, 0,
6439 0, 0, 0, 0,
6440 0, 0, 0, 0,
6441 0, 0, 0, 0,
6442 0, 0, 0, 0,
6443 0, 0, 0, 0,
6444 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006445
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006446 0, 0, 0, 0,
6447 0, 0, 0, 0,
6448 0, 0, 0, 0,
6449 0, 12, 13, 0,
6450 0, 14, 15, 0,
6451 0, 16, 17, 0,
6452 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006453
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006454 0, 0, 0, 0,
6455 0, 0, 0, 0,
6456 0, 0, 0, 0,
6457 0, 18, 19, 0,
6458 0, 20, 21, 0,
6459 0, 22, 23, 0,
6460 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006461
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006462 0, 0, 0, 0,
6463 0, 0, 0, 0,
6464 0, 0, 0, 0,
6465 0, 0, 0, 0,
6466 0, 0, 0, 0,
6467 0, 0, 0, 0,
6468 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006469
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006470 0, 0, 0, 0,
6471 0, 0, 0, 0,
6472 0, 0, 0, 0,
6473 0, 0, 0, 0,
6474 0, 0, 0, 0,
6475 0, 0, 0, 0,
6476 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006477
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006478 0, 0, 0, 0,
6479 0, 0, 0, 0,
6480 0, 0, 0, 0,
6481 0, 0, 0, 0,
6482 0, 0, 0, 0,
6483 0, 0, 0, 0,
6484 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006485
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006486 0, 0, 0, 0,
6487 0, 0, 0, 0,
6488 0, 0, 0, 0,
6489 0, 0, 0, 0,
6490 0, 0, 0, 0,
6491 0, 0, 0, 0,
6492 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006493
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006494 0, 0, 0, 0,
6495 0, 0, 0, 0,
6496 0, 0, 0, 0,
6497 0, 0, 0, 0,
6498 0, 0, 0, 0,
6499 0, 0, 0, 0,
6500 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006501
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006502 0, 0, 0, 0,
6503 0, 0, 0, 0,
6504 0, 0, 0, 0,
6505 0, 0, 0, 0,
6506 0, 0, 0, 0,
6507 0, 0, 0, 0,
6508 0, 0, 0, 0
6509 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006510
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006511 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006512
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006513 LayerTestResult<T, 4> result(outputTensorInfo);
6514 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006515
6516 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6517 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6518
6519 armnn::PadQueueDescriptor descriptor;
6520
6521 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6522 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6523 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6524 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6525 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6526
6527 descriptor.m_Parameters.m_PadList = PadList;
6528 armnn::WorkloadInfo info;
6529
6530 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6531 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6532
6533 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6534
6535 inputHandle->Allocate();
6536 outputHandle->Allocate();
6537
6538 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6539
Derek Lambertif30f7d32019-04-09 10:25:02 +01006540 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006541 workload->Execute();
6542
6543 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6544
6545 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006546}
6547
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006548LayerTestResult<uint8_t, 2> PadUint82dTest(
6549 armnn::IWorkloadFactory& workloadFactory,
6550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006551{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006552 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006553}
6554
David Monahan34757812019-06-19 11:47:21 +01006555LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6556 armnn::IWorkloadFactory& workloadFactory,
6557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6558{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006559 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006560}
6561
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006562LayerTestResult<uint8_t, 3> PadUint83dTest(
6563 armnn::IWorkloadFactory& workloadFactory,
6564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006565{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006566 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006567}
6568
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006569LayerTestResult<uint8_t, 4> PadUint84dTest(
6570 armnn::IWorkloadFactory& workloadFactory,
6571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006572{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006573 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006574}
6575
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006576
6577template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6578Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6579 armnn::IWorkloadFactory& workloadFactory,
6580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6581 float qScale,
6582 int32_t qOffset,
6583 const float customPaddingValue);
6584
6585template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6586Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6587 armnn::IWorkloadFactory& workloadFactory,
6588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6589 float qScale,
6590 int32_t qOffset);
6591
6592template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6593Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6594 armnn::IWorkloadFactory& workloadFactory,
6595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6596 float qScale,
6597 int32_t qOffset);
6598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006599LayerTestResult<float, 2> PadFloat322dTest(
6600 armnn::IWorkloadFactory& workloadFactory,
6601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006602{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006603 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006604}
6605
David Monahan34757812019-06-19 11:47:21 +01006606LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6607 armnn::IWorkloadFactory& workloadFactory,
6608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6609{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006610 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006611}
6612
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006613LayerTestResult<float, 3> PadFloat323dTest(
6614 armnn::IWorkloadFactory& workloadFactory,
6615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006616{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006617 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006618}
6619
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006620LayerTestResult<float, 4> PadFloat324dTest(
6621 armnn::IWorkloadFactory& workloadFactory,
6622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006623{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006624 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006625}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006626
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006627template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006628LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6629 armnn::IWorkloadFactory& workloadFactory,
6630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6631 float scale,
6632 int32_t offset,
6633 float outScale,
6634 int32_t outOffset,
6635 const armnn::DataLayout layout,
6636 float epsilon)
6637{
6638 // Width: 1
6639 // Height: 1
6640 // Channels: 3
6641 // BatchSize: 1
6642 unsigned int numberOfBatches = 1;
6643 unsigned int numberOfChannels = 3;
6644 unsigned int height = 1;
6645 unsigned int width = 1;
6646
6647 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6648 numberOfBatches, numberOfChannels, height, width, layout);
6649
6650 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6651 std::vector<float> inputValues
6652 {
6653 // Batch 0, Channel 0, Height (1) x Width (1)
6654 0.00000001f,
6655
6656 // Batch 0, Channel 1, Height (1) x Width (1)
6657 0.00000002f,
6658
6659 // Batch 0, Channel 2, Height (1) x Width (1)
6660 0.00000003f,
6661 };
6662
6663 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6664 std::vector<float> expectedOutputValues
6665 {
6666 // Batch 0, Channel 0, Height (1) x Width (1)
6667 0.00000001f * approxInvL2Norm,
6668 0.00000002f * approxInvL2Norm,
6669 0.00000003f * approxInvL2Norm,
6670 };
6671
6672 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6673 inputValues, outScale, outOffset, expectedOutputValues, layout,
6674 epsilon);
6675}
6676
6677
6678template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006679LayerTestResult<T, 4> L2Normalization1dTestCommon(
6680 armnn::IWorkloadFactory& workloadFactory,
6681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006682 float scale,
6683 int32_t offset,
6684 float outScale,
6685 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006686 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006687{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006688 // Width: 1
6689 // Height: 1
6690 // Channels: 10
6691 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006692 unsigned int numberOfBatches = 1;
6693 unsigned int numberOfChannels = 10;
6694 unsigned int height = 1;
6695 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006696
jimfly013aab7c32018-11-12 13:32:08 +00006697
Nina Drozdd41b2592018-11-19 13:03:36 +00006698 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006699 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006700 std::vector<float> inputValues
6701 {
6702 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006703 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006704
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006705 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006706 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006707
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006708 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006709 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006710
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006711 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006712 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006713
6714 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006715 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006716
6717 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006718 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006719
6720 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006721 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006722
6723 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006724 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006725
6726 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006727 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006728
6729 // Batch 0, Channel 9, Height (1) x Width (1)
6730 10.0f
6731 };
telsoa014fcda012018-03-09 14:13:49 +00006732 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006733 std::vector<float> expectedOutputValues
6734 {
6735 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006736 1.0f * approxInvL2Norm,
6737 2.0f * approxInvL2Norm,
6738 3.0f * approxInvL2Norm,
6739 4.0f * approxInvL2Norm,
6740 5.0f * approxInvL2Norm,
6741 6.0f * approxInvL2Norm,
6742 7.0f * approxInvL2Norm,
6743 8.0f * approxInvL2Norm,
6744 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006745 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006746 };
telsoa014fcda012018-03-09 14:13:49 +00006747
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006748
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006749 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6750 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006751}
6752
Ferran Balaguere52211e2019-06-17 12:23:52 +01006753LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6754 armnn::IWorkloadFactory& workloadFactory,
6755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6756 const armnn::DataLayout layout)
6757{
6758 // Dummy descriptor to get the default value of epsilon.
6759 armnn::L2NormalizationDescriptor descriptor;
6760
6761 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6762 layout, descriptor.m_Eps);
6763}
6764
6765LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6766 armnn::IWorkloadFactory& workloadFactory,
6767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6768 const armnn::DataLayout layout)
6769{
6770 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6771 layout, 1e-9f);
6772}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006773
6774LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006775 armnn::IWorkloadFactory& workloadFactory,
6776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006777 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006778{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006779 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006780}
6781
6782LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6783 armnn::IWorkloadFactory& workloadFactory,
6784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6785 const armnn::DataLayout layout)
6786{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006787 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006788 layout);
6789}
6790
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006791LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6792 armnn::IWorkloadFactory& workloadFactory,
6793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6794 const armnn::DataLayout layout)
6795{
6796 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6797 1.f/128, 128, layout);
6798}
6799
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006800template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6801LayerTestResult<T, 4> L2Normalization2dTestCommon(
6802 armnn::IWorkloadFactory& workloadFactory,
6803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006804 float scale,
6805 int32_t offset,
6806 float outScale,
6807 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006808 const armnn::DataLayout layout)
6809{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006810 // Width: 5
6811 // Height: 1
6812 // Channels: 2
6813 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006814 unsigned int numberOfBatches = 1;
6815 unsigned int numberOfChannels = 2;
6816 unsigned int height = 1;
6817 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006818
Nina Drozdd41b2592018-11-19 13:03:36 +00006819 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006820 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006821 std::vector<float> inputValues
6822 {
6823 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006824 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006825
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006826 // Batch 0, Channel 1, Height (1) x Width (5)
6827 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6828 };
6829 std::vector<float> expectedOutputValues
6830 {
6831 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006832 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6833 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6834 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6835 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6836 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006837
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006838 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006839 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6840 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6841 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6842 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006843 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006844 };
telsoa014fcda012018-03-09 14:13:49 +00006845
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006846 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6847 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006848}
telsoa014fcda012018-03-09 14:13:49 +00006849
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006850LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006851 armnn::IWorkloadFactory& workloadFactory,
6852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006853 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006854{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006855 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6856 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006857}
6858
6859LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6860 armnn::IWorkloadFactory& workloadFactory,
6861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6862 const armnn::DataLayout layout)
6863{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006864 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006865 layout);
6866}
6867
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006868LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6869 armnn::IWorkloadFactory& workloadFactory,
6870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6871 const armnn::DataLayout layout)
6872{
6873 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6874 1.f/128, 128, layout);
6875}
6876
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006877template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6878LayerTestResult<T, 4> L2Normalization3dTestCommon(
6879 armnn::IWorkloadFactory& workloadFactory,
6880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006881 float scale,
6882 int32_t offset,
6883 float outScale,
6884 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006885 const armnn::DataLayout layout)
6886{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006887 // Width: 3
6888 // Height: 4
6889 // Channels: 2
6890 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006891 unsigned int numberOfBatches = 1;
6892 unsigned int numberOfChannels = 2;
6893 unsigned int height = 4;
6894 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006895
Nina Drozdd41b2592018-11-19 13:03:36 +00006896 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006897 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006898 std::vector<float> inputValues
6899 {
6900 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006901 119.0f, 21.0f, 150.0f,
6902 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006903 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006904 147.0f, 199.0f, 220.0f,
6905
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006906 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006907 110.0f, 140.0f, 73.0f,
6908 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006909 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006910 162.0f, 12.0f, 161.0f
6911 };
6912 std::vector<float> expectedOutputValues
6913 {
6914 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006915 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006916 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006917 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6918 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006919 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006920 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006921 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006922 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6923 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6924 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6925 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6926 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6927
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006928 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006929 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6930 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006931 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006932 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6933 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006934 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6935 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006936 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6937 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6938 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006939 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006940 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6941 };
telsoa014fcda012018-03-09 14:13:49 +00006942
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006943 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6944 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006945}
telsoa014fcda012018-03-09 14:13:49 +00006946
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006947LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006948 armnn::IWorkloadFactory& workloadFactory,
6949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006950 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006951{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006952 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6953 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006954}
6955
6956LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6957 armnn::IWorkloadFactory& workloadFactory,
6958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6959 const armnn::DataLayout layout)
6960{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006961 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006962 layout);
6963}
6964
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006965LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6966 armnn::IWorkloadFactory& workloadFactory,
6967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6968 const armnn::DataLayout layout)
6969{
6970 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6971 1.f/128, 128, layout);
6972}
6973
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006974template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6975LayerTestResult<T, 4> L2Normalization4dTestCommon(
6976 armnn::IWorkloadFactory& workloadFactory,
6977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006978 float scale,
6979 int32_t offset,
6980 float outScale,
6981 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006982 const armnn::DataLayout layout)
6983{
6984 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006985 // Height: 4
6986 // Channels: 3
6987 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006988 unsigned int numberOfBatches = 2;
6989 unsigned int numberOfChannels = 3;
6990 unsigned int height = 4;
6991 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006992
Nina Drozdd41b2592018-11-19 13:03:36 +00006993 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006994 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006995 std::vector<float> inputValues
6996 {
6997 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006998 235.0f, 46.0f, 178.0f,
6999 100.0f, 123.0f, 19.0f,
7000 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007001 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00007002
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007003 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007004 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007005 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00007006 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007007 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00007008
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007009 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007010 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00007011 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007012 12.0f, 209.0f, 200.0f,
7013 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00007014
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007015 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007016 67.0f, 90.0f, 49.0f,
7017 7.0f, 163.0f, 18.0f,
7018 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00007019 247.0f, 59.0f, 189.0f,
7020
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007021 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007022 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007023 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00007024 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007025 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00007026
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007027 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007028 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00007029 115.0f, 116.0f, 238.0f,
7030 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007031 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007032 };
7033 std::vector<float> expectedOutputValues
7034 {
7035 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007036 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007037 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007038 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7039 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7040 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007041 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007042 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007043 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007044 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007045 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007046 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007047 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007048
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007049 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007050 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007051 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007052 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007053 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007054 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007055 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007056 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7057 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7058 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007059 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7060 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7061 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007062
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007063 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007064 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007065 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7066 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7067 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007068 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007069 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007070 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007071 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7072 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007073 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7074 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7075 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007076
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007077 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007078 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7079 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7080 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7081 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007082 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007083 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7084 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007085 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7086 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7087 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007088 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007089 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7090
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007091 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007092 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7093 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7094 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007095 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007096 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7097 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7098 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7099 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007100 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7101 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007102 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007103 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007104
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007105 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007106 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007107 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7108 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7109 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7110 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7111 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7112 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007113 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007114 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007115 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007116 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007117 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007118 };
telsoa014fcda012018-03-09 14:13:49 +00007119
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007120 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7121 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007122}
7123
7124LayerTestResult<float, 4> L2Normalization4dTest(
7125 armnn::IWorkloadFactory& workloadFactory,
7126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7127 const armnn::DataLayout layout)
7128{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007129 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7130 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007131}
7132
7133LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7134 armnn::IWorkloadFactory& workloadFactory,
7135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7136 const armnn::DataLayout layout)
7137{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007138 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007139 layout);
telsoa014fcda012018-03-09 14:13:49 +00007140}
7141
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007142LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7143 armnn::IWorkloadFactory& workloadFactory,
7144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7145 const armnn::DataLayout layout)
7146{
7147 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7148 1.f/128, 128, layout);
7149}
7150
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007151template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007152LayerTestResult<T, 4> ConstantTestImpl(
7153 armnn::IWorkloadFactory& workloadFactory,
7154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007155 float qScale,
7156 int32_t qOffset)
7157{
7158 constexpr unsigned int inputWidth = 3;
7159 constexpr unsigned int inputHeight = 4;
7160 constexpr unsigned int inputChannels = 3;
7161 constexpr unsigned int inputBatchSize = 2;
7162
7163 constexpr unsigned int outputWidth = inputWidth;
7164 constexpr unsigned int outputHeight = inputHeight;
7165 constexpr unsigned int outputChannels = inputChannels;
7166 constexpr unsigned int outputBatchSize = inputBatchSize;
7167
Nina Drozd58ef2c62019-05-16 12:09:18 +01007168 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7169 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007170
Nina Drozd58ef2c62019-05-16 12:09:18 +01007171 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7172 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007173
7174 // Set quantization parameters if the requested type is a quantized type.
7175 if(armnn::IsQuantizedType<T>())
7176 {
7177 inputTensorInfo.SetQuantizationScale(qScale);
7178 inputTensorInfo.SetQuantizationOffset(qOffset);
7179 outputTensorInfo.SetQuantizationScale(qScale);
7180 outputTensorInfo.SetQuantizationOffset(qOffset);
7181 }
7182
7183 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7184 QuantizedVector<T>(qScale, qOffset, {
7185 // Batch 0, Channel 0
7186 235.0f, 46.0f, 178.0f,
7187 100.0f, 123.0f, 19.0f,
7188 172.0f, 74.0f, 250.0f,
7189 6.0f, 195.0f, 80.0f,
7190
7191 // Batch 0, Channel 1
7192 113.0f, 95.0f, 202.0f,
7193 77.0f, 114.0f, 71.0f,
7194 122.0f, 246.0f, 166.0f,
7195 82.0f, 28.0f, 37.0f,
7196
7197 // Batch 0, Channel 2
7198 56.0f, 170.0f, 162.0f,
7199 194.0f, 89.0f, 254.0f,
7200 12.0f, 209.0f, 200.0f,
7201 1.0f, 64.0f, 54.0f,
7202
7203 // Batch 1, Channel 0
7204 67.0f, 90.0f, 49.0f,
7205 7.0f, 163.0f, 18.0f,
7206 25.0f, 117.0f, 103.0f,
7207 247.0f, 59.0f, 189.0f,
7208
7209 // Batch 1, Channel 1
7210 239.0f, 104.0f, 199.0f,
7211 17.0f, 124.0f, 153.0f,
7212 222.0f, 217.0f, 75.0f,
7213 32.0f, 126.0f, 21.0f,
7214
7215 // Batch 1, Channel 2
7216 97.0f, 145.0f, 215.0f,
7217 115.0f, 116.0f, 238.0f,
7218 226.0f, 16.0f, 132.0f,
7219 92.0f, 125.0f, 88.0f,
7220 })));
7221
7222 LayerTestResult<T, 4> result(outputTensorInfo);
7223 result.outputExpected = input;
7224
7225 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7226
7227 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7228 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7229
7230 armnn::ConstantQueueDescriptor descriptor;
7231 descriptor.m_LayerOutput = &constantTensor;
7232
7233 armnn::WorkloadInfo info;
7234 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7235
7236 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7237
7238 outputHandle->Allocate();
7239
Derek Lambertif30f7d32019-04-09 10:25:02 +01007240 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007241 workload->Execute();
7242
7243 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7244 return result;
7245}
7246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007247LayerTestResult<float, 4> ConstantTest(
7248 armnn::IWorkloadFactory& workloadFactory,
7249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007250{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007251 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007252}
7253
Nina Drozd58ef2c62019-05-16 12:09:18 +01007254LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7255 armnn::IWorkloadFactory& workloadFactory,
7256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7257{
7258 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7259}
7260
7261LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007262 armnn::IWorkloadFactory& workloadFactory,
7263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007264{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007265 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007266}
7267
Jim Flynn4ed6c832019-05-20 11:02:46 +01007268LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007269 armnn::IWorkloadFactory& workloadFactory,
7270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7271{
7272 unsigned int outputWidth = 3;
7273 unsigned int outputHeight = 6;
7274 unsigned int outputChannels = 3;
7275
7276 unsigned int inputWidth1 = 3;
7277 unsigned int inputHeight1 = 6;
7278 unsigned int inputChannels1 = 2;
7279
7280 unsigned int inputWidth2 = 3;
7281 unsigned int inputHeight2 = 6;
7282 unsigned int inputChannels2 = 1;
7283
7284 // Defines the tensor descriptors.
7285 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7286 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7287 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7288
7289 // Quantized input1 tensor. Range [-3, 1]
7290 const float inputScale1 = 0.015686f;
7291 const int32_t inputOffset1 = 192;
7292
7293 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7294 {
7295 1, 2, 3,
7296 4, 5, 6,
7297 7, 8, 9,
7298 10, 11, 12,
7299 13, 14, 15,
7300 16, 17, 18,
7301
7302 19, 20, 21,
7303 22, 23, 24,
7304 25, 26, 27,
7305 28, 29, 30,
7306 31, 32, 33,
7307 34, 35, 36,
7308 })
7309 );
7310
7311 // Quatized input2 tensor. Range [-1, 4]
7312 const float inputScale2 = 0.019608f;
7313 const int32_t inputOffset2 = 50;
7314
7315 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7316 {
7317 37, 38, 39,
7318 40, 41, 42,
7319 43, 44, 45,
7320 46, 47, 48,
7321 49, 50, 51,
7322 52, 53, 54,
7323 })
7324 );
7325
7326 // Output has the same quantization parameters than input1,
7327 // so that only the requantization of input2 is required
7328 const float outputScale = 0.015686f;
7329 const int32_t outputOffset = 192;
7330
7331 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7332
7333 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7334 {
7335 1, 2, 3,
7336 4, 5, 6,
7337 7, 8, 9,
7338 10, 11, 12,
7339 13, 14, 15,
7340 16, 17, 18,
7341
7342 19, 20, 21,
7343 22, 23, 24,
7344 25, 26, 27,
7345 28, 29, 30,
7346 31, 32, 33,
7347 34, 35, 36,
7348
7349 176, 177, 178,
7350 179, 181, 182,
7351 183, 184, 186,
7352 187, 188, 189,
7353 191, 192, 193,
7354 195, 196, 197,
7355 })
7356 );
7357
7358 outputTensorInfo.SetQuantizationScale(outputScale);
7359 outputTensorInfo.SetQuantizationOffset(outputOffset);
7360 inputTensorInfo1.SetQuantizationScale(inputScale1);
7361 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7362 inputTensorInfo2.SetQuantizationScale(inputScale2);
7363 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7364
7365 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007366 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007367
7368 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007369 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007370
7371 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7372
7373 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7374
7375 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7376 subTensorsSupported ?
7377 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7378 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7379
7380 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7381 subTensorsSupported ?
7382 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7383 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7384
Jim Flynne242f2d2019-05-22 14:24:13 +01007385 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007386 armnn::WorkloadInfo info;
7387 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7388 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7389 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7390
7391 data.m_ViewOrigins.push_back(window1);
7392 data.m_ViewOrigins.push_back(window2);
7393
Jim Flynn4ed6c832019-05-20 11:02:46 +01007394 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007395
7396 inputHandle1->Allocate();
7397 inputHandle2->Allocate();
7398 outputHandle->Allocate();
7399
7400 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7401 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7402
Derek Lambertif30f7d32019-04-09 10:25:02 +01007403 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007404 workload->Execute();
7405
7406 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7407
7408 return ret;
7409}
7410
Jim Flynn4ed6c832019-05-20 11:02:46 +01007411LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007412 armnn::IWorkloadFactory& workloadFactory,
7413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007414{
surmeh013537c2c2018-05-18 16:31:43 +01007415 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007416 unsigned int outputHeight = 6;
7417 unsigned int outputChannels = 3;
7418
surmeh013537c2c2018-05-18 16:31:43 +01007419 unsigned int inputWidth1 = 3;
7420 unsigned int inputHeight1 = 6;
7421 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007422
surmeh013537c2c2018-05-18 16:31:43 +01007423 unsigned int inputWidth2 = 3;
7424 unsigned int inputHeight2 = 6;
7425 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007426
telsoa01c577f2c2018-08-31 09:22:23 +01007427 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007428 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7429 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7430 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007431
Jim Flynn4ed6c832019-05-20 11:02:46 +01007432 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007433 const float scale = 0.13497836f;
7434 const int32_t offset = -7;
7435
7436 outputTensorInfo.SetQuantizationScale(scale);
7437 outputTensorInfo.SetQuantizationOffset(offset);
7438 inputTensorInfo1.SetQuantizationScale(scale);
7439 inputTensorInfo1.SetQuantizationOffset(offset);
7440 inputTensorInfo2.SetQuantizationScale(scale);
7441 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007442
7443 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7444
7445 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007446 {
7447 1, 2, 3,
7448 4, 5, 6,
7449 7, 8, 9,
7450 10, 11, 12,
7451 13, 14, 15,
7452 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007453
surmeh013537c2c2018-05-18 16:31:43 +01007454 19, 20, 21,
7455 22, 23, 24,
7456 25, 26, 27,
7457 28, 29, 30,
7458 31, 32, 33,
7459 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007460
surmeh013537c2c2018-05-18 16:31:43 +01007461 37, 38, 39,
7462 40, 41, 42,
7463 43, 44, 45,
7464 46, 47, 48,
7465 49, 50, 51,
7466 52, 53, 54,
7467 })
telsoa014fcda012018-03-09 14:13:49 +00007468 );
7469
telsoa014fcda012018-03-09 14:13:49 +00007470 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7471 {
surmeh013537c2c2018-05-18 16:31:43 +01007472 1, 2, 3,
7473 4, 5, 6,
7474 7, 8, 9,
7475 10, 11, 12,
7476 13, 14, 15,
7477 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007478
surmeh013537c2c2018-05-18 16:31:43 +01007479 19, 20, 21,
7480 22, 23, 24,
7481 25, 26, 27,
7482 28, 29, 30,
7483 31, 32, 33,
7484 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007485 })
7486 );
7487
7488 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7489 {
surmeh013537c2c2018-05-18 16:31:43 +01007490 37, 38, 39,
7491 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007492 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007493 46, 47, 48,
7494 49, 50, 51,
7495 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007496 })
7497 );
7498
telsoa01c577f2c2018-08-31 09:22:23 +01007499 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007500 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007501
telsoa01c577f2c2018-08-31 09:22:23 +01007502 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007503 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007504
telsoa014fcda012018-03-09 14:13:49 +00007505
7506 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7507
7508 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7509
7510 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7511 subTensorsSupported ?
7512 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7513 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7514
7515 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7516 subTensorsSupported ?
7517 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7518 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7519
telsoa014fcda012018-03-09 14:13:49 +00007520
Jim Flynne242f2d2019-05-22 14:24:13 +01007521 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007522 armnn::WorkloadInfo info;
7523 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7524 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007525 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7526
7527 data.m_ViewOrigins.push_back(window1);
7528 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007529
Jim Flynn4ed6c832019-05-20 11:02:46 +01007530 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007531
7532 inputHandle1->Allocate();
7533 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007534 outputHandle->Allocate();
7535
7536 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7537 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007538
Derek Lambertif30f7d32019-04-09 10:25:02 +01007539 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007540 workload->Execute();
7541
7542 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7543
7544 return ret;
7545}
7546
Jim Flynn4ed6c832019-05-20 11:02:46 +01007547LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007548 armnn::IWorkloadFactory& workloadFactory,
7549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7550{
7551 unsigned int outputWidth = 3;
7552 unsigned int outputHeight = 6;
7553 unsigned int outputChannels = 3;
7554
7555 unsigned int inputWidth1 = 3;
7556 unsigned int inputHeight1 = 6;
7557 unsigned int inputChannels1 = 2;
7558
7559 unsigned int inputWidth2 = 3;
7560 unsigned int inputHeight2 = 6;
7561 unsigned int inputChannels2 = 1;
7562
7563 // Defines the tensor descriptors.
7564 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7565 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7566 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7567
Jim Flynn4ed6c832019-05-20 11:02:46 +01007568 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007569 const float scale = 0.13497836f;
7570 const int32_t offset = -7;
7571
7572 outputTensorInfo.SetQuantizationScale(scale);
7573 outputTensorInfo.SetQuantizationOffset(offset);
7574 inputTensorInfo1.SetQuantizationScale(scale);
7575 inputTensorInfo1.SetQuantizationOffset(offset);
7576 inputTensorInfo2.SetQuantizationScale(scale);
7577 inputTensorInfo2.SetQuantizationOffset(offset);
7578
7579 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7580
7581 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7582 {
7583 1, 2, 3,
7584 4, 5, 6,
7585 7, 8, 9,
7586 10, 11, 12,
7587 13, 14, 15,
7588 16, 17, 18,
7589
7590 19, 20, 21,
7591 22, 23, 24,
7592 25, 26, 27,
7593 28, 29, 30,
7594 31, 32, 33,
7595 34, 35, 36,
7596
7597 37, 38, 39,
7598 40, 41, 42,
7599 43, 44, 45,
7600 46, 47, 48,
7601 49, 50, 51,
7602 52, 53, 54,
7603 }));
7604
7605 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7606 {
7607 1, 2, 3,
7608 4, 5, 6,
7609 7, 8, 9,
7610 10, 11, 12,
7611 13, 14, 15,
7612 16, 17, 18,
7613
7614 19, 20, 21,
7615 22, 23, 24,
7616 25, 26, 27,
7617 28, 29, 30,
7618 31, 32, 33,
7619 34, 35, 36,
7620 }));
7621
7622 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7623 {
7624 37, 38, 39,
7625 40, 41, 42,
7626 43, 44, 45,
7627 46, 47, 48,
7628 49, 50, 51,
7629 52, 53, 54,
7630 }));
7631
7632 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007633 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007634
7635 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007636 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007637
7638
7639 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7640
7641 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7642
7643 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7644 subTensorsSupported ?
7645 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7646 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7647
7648 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7649 subTensorsSupported ?
7650 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7651 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7652
7653
Jim Flynne242f2d2019-05-22 14:24:13 +01007654 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007655 armnn::WorkloadInfo info;
7656 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7657 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7658 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7659
7660 data.m_ViewOrigins.push_back(window1);
7661 data.m_ViewOrigins.push_back(window2);
7662
Jim Flynn4ed6c832019-05-20 11:02:46 +01007663 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007664
7665 inputHandle1->Allocate();
7666 inputHandle2->Allocate();
7667 outputHandle->Allocate();
7668
7669 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7670 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7671
7672 workload->PostAllocationConfigure();
7673 workload->Execute();
7674
7675 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7676
7677 return ret;
7678}
telsoa014fcda012018-03-09 14:13:49 +00007679
surmeh01bceff2f2018-03-29 16:29:27 +01007680namespace
telsoa014fcda012018-03-09 14:13:49 +00007681{
Sadik Armagan2999a022019-04-09 14:20:12 +01007682template <typename T>
7683LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007684 armnn::IWorkloadFactory& workloadFactory,
7685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7686 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007687 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007688 float scale0,
7689 int32_t offset0,
7690 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007691 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007692 float scale1,
7693 int32_t offset1,
7694 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007695 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007696 float outScale,
7697 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007698{
Sadik Armagan2999a022019-04-09 14:20:12 +01007699 auto dataType = (std::is_same<T, uint8_t>::value ?
7700 armnn::DataType::QuantisedAsymm8 :
7701 armnn::DataType::QuantisedSymm16);
7702
7703 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7704 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7705 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007706
surmeh01bceff2f2018-03-29 16:29:27 +01007707 inputTensorInfo0.SetQuantizationScale(scale0);
7708 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007709
surmeh01bceff2f2018-03-29 16:29:27 +01007710 inputTensorInfo1.SetQuantizationScale(scale1);
7711 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007712
surmeh01bceff2f2018-03-29 16:29:27 +01007713 outputTensorInfo.SetQuantizationScale(outScale);
7714 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007715
Sadik Armagan2999a022019-04-09 14:20:12 +01007716 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7717 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007718
Sadik Armagan2999a022019-04-09 14:20:12 +01007719 LayerTestResult<T, 4> result(outputTensorInfo);
7720 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7721
7722 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7723 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7724 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7725
7726 armnn::AdditionQueueDescriptor data;
7727 armnn::WorkloadInfo info;
7728 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7729 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7730 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7731
7732 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7733
7734 inputHandle0->Allocate();
7735 inputHandle1->Allocate();
7736 outputHandle->Allocate();
7737
7738 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7739 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7740
Derek Lambertif30f7d32019-04-09 10:25:02 +01007741 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007742 workload->Execute();
7743
7744 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7745
7746 return result;
7747}
7748} // anonymous namespace
7749
7750LayerTestResult<uint8_t, 4> AdditionUint8Test(
7751 armnn::IWorkloadFactory& workloadFactory,
7752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7753{
7754 const unsigned int shape0[] = { 1, 2, 2, 3 };
7755 const unsigned int shape1[] = { 1, 2, 2, 3 };
7756
7757 std::vector<uint8_t> input0(
7758 {
7759 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7760 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7761 });
7762
7763 std::vector<uint8_t> input1(
7764 {
7765 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7766 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7767 });
7768
7769 std::vector<uint8_t> output(
7770 {
7771 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7772 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7773 });
7774
7775 return AdditionQuantizeTestHelper(workloadFactory,
7776 memoryManager,
7777 shape0, input0, 7.0f, 3,
7778 shape1, input1, 7.0f, 3,
7779 shape0, output, 7.0f, 3);
7780}
7781
7782LayerTestResult<int16_t, 4> AdditionInt16Test(
7783 armnn::IWorkloadFactory& workloadFactory,
7784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7785{
7786 const unsigned int shape0[] = { 1, 2, 2, 3 };
7787 const unsigned int shape1[] = { 1, 2, 2, 3 };
7788
7789 std::vector<int16_t> input0(
7790 {
7791 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7792 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7793 });
7794
7795 std::vector<int16_t> input1(
7796 {
7797 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7798 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7799 });
7800
7801 std::vector<int16_t> output(
7802 {
7803 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7804 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7805 });
7806
7807 return AdditionQuantizeTestHelper(workloadFactory,
7808 memoryManager,
7809 shape0, input0, 7.0f, 0,
7810 shape1, input1, 7.0f, 0,
7811 shape0, output, 7.0f, 0);
7812}
7813
7814namespace
7815{
7816template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7817LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7818 armnn::IWorkloadFactory& workloadFactory,
7819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7820 const unsigned int shape0[4],
7821 const std::vector<T> & values0,
7822 float scale0,
7823 int32_t offset0,
7824 const unsigned int shape1[4],
7825 const std::vector<T> & values1,
7826 float scale1,
7827 int32_t offset1,
7828 const unsigned int outShape[4],
7829 const std::vector<T> & outValues,
7830 float outScale,
7831 int32_t outOffset)
7832{
7833 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7834 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7835 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7836
7837 inputTensorInfo0.SetQuantizationScale(scale0);
7838 inputTensorInfo0.SetQuantizationOffset(offset0);
7839
7840 inputTensorInfo1.SetQuantizationScale(scale1);
7841 inputTensorInfo1.SetQuantizationOffset(offset1);
7842
7843 outputTensorInfo.SetQuantizationScale(outScale);
7844 outputTensorInfo.SetQuantizationOffset(outOffset);
7845
7846 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7847 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7848
7849 LayerTestResult<T, 4> result(outputTensorInfo);
7850 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007851
surmeh01bceff2f2018-03-29 16:29:27 +01007852 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007853 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007854 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7855
7856 armnn::MultiplicationQueueDescriptor data;
7857 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007858 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7859 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007860 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7861
7862 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7863
surmeh01bceff2f2018-03-29 16:29:27 +01007864 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007865 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007866 outputHandle->Allocate();
7867
surmeh01bceff2f2018-03-29 16:29:27 +01007868 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007869 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007870
Derek Lambertif30f7d32019-04-09 10:25:02 +01007871 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007872 workload->Execute();
7873
7874 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7875
7876 return result;
7877}
surmeh01bceff2f2018-03-29 16:29:27 +01007878} // anonymous namespace
7879
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007880LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7881 armnn::IWorkloadFactory& workloadFactory,
7882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007883{
7884 unsigned int batchSize = 1;
7885 unsigned int channels = 2;
7886 unsigned int height = 2;
7887 unsigned int width = 3;
7888 const unsigned int shape[] = { batchSize, channels, height, width };
7889
telsoa01c577f2c2018-08-31 09:22:23 +01007890 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007891 std::vector<uint8_t> input0({
7892 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7893 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7894 });
7895
telsoa01c577f2c2018-08-31 09:22:23 +01007896 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007897 std::vector<uint8_t> input1({
7898 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7899 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7900 });
7901
telsoa01c577f2c2018-08-31 09:22:23 +01007902 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007903 std::vector<uint8_t> output(
7904 {
7905 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7906 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7907 });
7908
Sadik Armagan2999a022019-04-09 14:20:12 +01007909 // Scale/offset chosen to have output values out of range.
7910 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7911 memoryManager,
7912 shape,
7913 input0,
7914 4.0f,
7915 1,
7916 shape,
7917 input1,
7918 3.0f,
7919 -2,
7920 shape,
7921 output,
7922 1366.255f,
7923 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007924}
7925
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007926LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7927 armnn::IWorkloadFactory& workloadFactory,
7928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007929{
7930 const unsigned int shape0[] = { 1, 2, 2, 3 };
7931 const unsigned int shape1[] = { 1, 1, 1, 1 };
7932
7933 std::vector<uint8_t> input0({
7934 1, 2, 3, 4, 5, 6,
7935 7, 8, 9, 10, 11, 12
7936 });
7937
7938 std::vector<uint8_t> input1({2});
7939
7940 std::vector<uint8_t> output({
7941 2, 4, 6, 8, 10, 12,
7942 14, 16, 18, 20, 22, 24
7943 });
7944
Sadik Armagan2999a022019-04-09 14:20:12 +01007945 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7946 memoryManager,
7947 shape0,
7948 input0,
7949 1.0f,
7950 0,
7951 shape1,
7952 input1,
7953 1.0f,
7954 0,
7955 shape0,
7956 output,
7957 1.0f,
7958 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007959}
7960
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007961LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7962 armnn::IWorkloadFactory& workloadFactory,
7963 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007964{
7965 const unsigned int shape0[] = { 1, 2, 2, 3 };
7966 const unsigned int shape1[] = { 1, 1, 1, 3 };
7967
7968 std::vector<uint8_t> input0({
7969 1, 2, 3, 4, 5, 6,
7970 7, 8, 9, 10, 11, 12
7971 });
7972
7973 std::vector<uint8_t> input1({1, 2, 3});
7974
7975 std::vector<uint8_t> output({
7976 1, 4, 9, 4, 10, 18,
7977 7, 16, 27, 10, 22, 36
7978 });
7979
Sadik Armagan2999a022019-04-09 14:20:12 +01007980 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7981 memoryManager,
7982 shape0,
7983 input0,
7984 1.0f,
7985 0,
7986 shape1,
7987 input1,
7988 1.0f,
7989 0,
7990 shape0,
7991 output,
7992 1.0f,
7993 0);
7994}
7995
7996LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7997 armnn::IWorkloadFactory& workloadFactory,
7998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7999{
8000 const unsigned int shape[] = { 1, 2, 2, 3 };
8001
8002 std::vector<int16_t> input0(
8003 {
8004 6, 7, 8, 9, 10, 11,
8005 12, 13, 14, 15, 16, 17
8006 });
8007
8008 std::vector<int16_t> input1(
8009 {
8010 1, 2, 3, 4, 5, 6,
8011 7, 8, 9, 10, 11, 12
8012 });
8013
8014 std::vector<int16_t> output(
8015 {
8016 6, 14, 24, 36, 50, 66,
8017 84, 104, 126, 150, 176, 204
8018 });
8019
8020 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8021 memoryManager,
8022 shape,
8023 input0,
8024 1.0f,
8025 0,
8026 shape,
8027 input1,
8028 1.0f,
8029 0,
8030 shape,
8031 output,
8032 1.0f,
8033 0);
8034}
8035
8036LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8037 armnn::IWorkloadFactory& workloadFactory,
8038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8039{
8040 const unsigned int shape0[] = { 1, 2, 2, 3 };
8041 const unsigned int shape1[] = { 1, 1, 1, 1 };
8042
8043 std::vector<int16_t> input0(
8044 {
8045 1, 2, 3, 4, 5, 6,
8046 7, 8, 9, 10, 11, 12
8047 });
8048
8049 std::vector<int16_t> input1({2});
8050
8051 std::vector<int16_t> output(
8052 {
8053 2, 4, 6, 8, 10, 12,
8054 14, 16, 18, 20, 22, 24
8055 });
8056
8057 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8058 memoryManager,
8059 shape0,
8060 input0,
8061 1.0f,
8062 0,
8063 shape1,
8064 input1,
8065 1.0f,
8066 0,
8067 shape0,
8068 output,
8069 1.0f,
8070 0);
8071}
8072
8073LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8074 armnn::IWorkloadFactory& workloadFactory,
8075 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8076{
8077 const unsigned int shape0[] = { 1, 2, 2, 3 };
8078 const unsigned int shape1[] = { 1, 1, 1, 3 };
8079
8080 std::vector<int16_t> input0(
8081 {
8082 1, 2, 3, 4, 5, 6,
8083 7, 8, 9, 10, 11, 12
8084 });
8085
8086 std::vector<int16_t> input1({1, 2, 3});
8087
8088 std::vector<int16_t> output(
8089 {
8090 1, 4, 9, 4, 10, 18,
8091 7, 16, 27, 10, 22, 36
8092 });
8093
8094 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8095 memoryManager,
8096 shape0,
8097 input0,
8098 1.0f,
8099 0,
8100 shape1,
8101 input1,
8102 1.0f,
8103 0,
8104 shape0,
8105 output,
8106 1.0f,
8107 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008108}
telsoa014fcda012018-03-09 14:13:49 +00008109
David Beckf195f032018-09-06 16:46:34 +01008110namespace
8111{
Sadik Armagan2999a022019-04-09 14:20:12 +01008112template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008113LayerTestResult<T, 4> SubtractionTestHelper(
8114 armnn::IWorkloadFactory& workloadFactory,
8115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8116 const unsigned int shape0[4],
8117 const std::vector<T>& values0,
8118 float scale0,
8119 int32_t offset0,
8120 const unsigned int shape1[4],
8121 const std::vector<T> & values1,
8122 float scale1,
8123 int32_t offset1,
8124 const unsigned int outShape[4],
8125 const std::vector<T> & outValues,
8126 float outScale,
8127 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008128{
Sadik Armagan2999a022019-04-09 14:20:12 +01008129 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8130 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8131 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008132
8133 inputTensorInfo0.SetQuantizationScale(scale0);
8134 inputTensorInfo0.SetQuantizationOffset(offset0);
8135
8136 inputTensorInfo1.SetQuantizationScale(scale1);
8137 inputTensorInfo1.SetQuantizationOffset(offset1);
8138
8139 outputTensorInfo.SetQuantizationScale(outScale);
8140 outputTensorInfo.SetQuantizationOffset(outOffset);
8141
8142 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8143 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8144
8145 LayerTestResult<T, 4> result(outputTensorInfo);
8146 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8147
8148 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8149 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8150 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8151
8152 armnn::SubtractionQueueDescriptor data;
8153 armnn::WorkloadInfo info;
8154 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8155 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8156 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8157
8158 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8159
8160 inputHandle0->Allocate();
8161 inputHandle1->Allocate();
8162 outputHandle->Allocate();
8163
8164 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8165 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8166
Derek Lambertif30f7d32019-04-09 10:25:02 +01008167 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008168 workload->Execute();
8169
8170 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8171
8172 return result;
8173}
8174} // anonymous namespace
8175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008176LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8177 armnn::IWorkloadFactory& workloadFactory,
8178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008179{
8180 const unsigned int shape0[] = { 1, 1, 2, 2 };
8181 const unsigned int shape1[] = { 1, 1, 2, 2 };
8182
8183 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8184 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8185 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8186
Sadik Armagan2999a022019-04-09 14:20:12 +01008187 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8188 memoryManager,
8189 shape0, input0, 0.5f, 2,
8190 shape1, input1, 1.0f, 0,
8191 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008192}
8193
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008194LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8195 armnn::IWorkloadFactory& workloadFactory,
8196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008197{
8198 const unsigned int shape0[] = { 1, 1, 2, 2 };
8199 const unsigned int shape1[] = { 1, 1, 1, 1 };
8200
8201 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8202 std::vector<uint8_t> input1({ 2 });
8203 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8204
Sadik Armagan2999a022019-04-09 14:20:12 +01008205 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8206 memoryManager,
8207 shape0, input0, 0.5f, 2,
8208 shape1, input1, 1.0f, 0,
8209 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008210}
8211
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008212LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8213 armnn::IWorkloadFactory& workloadFactory,
8214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008215{
8216 const unsigned int shape0[] = { 1, 1, 2, 2 };
8217 const unsigned int shape1[] = { 1, 1, 2, 1 };
8218
8219 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8220 std::vector<uint8_t> input1({ 2, 1 });
8221 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8222
Sadik Armagan2999a022019-04-09 14:20:12 +01008223 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8224 memoryManager,
8225 shape0, input0, 1.0f, 0,
8226 shape1, input1, 1.0f, 0,
8227 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008228}
8229
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008230LayerTestResult<float, 4> SubtractionTest(
8231 armnn::IWorkloadFactory& workloadFactory,
8232 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008233{
8234 const unsigned int shape0[] = { 1, 1, 2, 2 };
8235 const unsigned int shape1[] = { 1, 1, 2, 2 };
8236
8237 std::vector<float> input0({ 1, 2, 3, 4 });
8238 std::vector<float> input1({ 1, -1, 0, 2 });
8239 std::vector<float> output({ 0, 3, 3, 2 });
8240
Sadik Armagan2999a022019-04-09 14:20:12 +01008241 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8242 memoryManager,
8243 shape0, input0, 1.0f, 0,
8244 shape1, input1, 1.0f, 0,
8245 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008246}
8247
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008248LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8249 armnn::IWorkloadFactory& workloadFactory,
8250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008251{
8252 const unsigned int shape0[] = { 1, 1, 2, 2 };
8253 const unsigned int shape1[] = { 1, 1, 1, 1 };
8254
8255 std::vector<float> input0({ 1, 2, 3, 4 });
8256 std::vector<float> input1({ 10 });
8257 std::vector<float> output({ -9, -8, -7, -6 });
8258
Sadik Armagan2999a022019-04-09 14:20:12 +01008259 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8260 memoryManager,
8261 shape0, input0, 1.0f, 0,
8262 shape1, input1, 1.0f, 0,
8263 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008264}
8265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008266LayerTestResult<float, 4> SubtractionBroadcastTest(
8267 armnn::IWorkloadFactory& workloadFactory,
8268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008269{
8270 const unsigned int shape0[] = { 1, 1, 2, 2 };
8271 const unsigned int shape1[] = { 1, 1, 1, 2 };
8272
8273 std::vector<float> input0({ 1, 2, 3, 4 });
8274 std::vector<float> input1({ 10, -5 });
8275 std::vector<float> output({ -9, 7, -7, 9 });
8276
Sadik Armagan2999a022019-04-09 14:20:12 +01008277 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8278 memoryManager,
8279 shape0, input0, 1.0f, 0,
8280 shape1, input1, 1.0f, 0,
8281 shape0, output, 1.0f, 0);
8282}
8283
8284LayerTestResult<int16_t, 4> SubtractionInt16Test(
8285 armnn::IWorkloadFactory& workloadFactory,
8286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8287{
8288 const unsigned int shape0[] = { 1, 1, 2, 2 };
8289 const unsigned int shape1[] = { 1, 1, 2, 2 };
8290
8291 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8292 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8293 std::vector<int16_t> output({ 3, 3, 5, 5 });
8294
8295 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8296 memoryManager,
8297 shape0, input0, 0.5f, 0,
8298 shape1, input1, 1.0f, 0,
8299 shape0, output, 1.0f, 0);
8300}
8301
8302LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8303 armnn::IWorkloadFactory& workloadFactory,
8304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8305{
8306 const unsigned int shape0[] = { 1, 1, 2, 2 };
8307 const unsigned int shape1[] = { 1, 1, 1, 1 };
8308
8309 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8310 std::vector<int16_t> input1({ 2 });
8311 std::vector<int16_t> output({ 3, 4, 5, 6 });
8312
8313 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8314 memoryManager,
8315 shape0, input0, 0.5f, 0,
8316 shape1, input1, 1.0f, 0,
8317 shape0, output, 1.0f, 0);
8318}
8319
8320LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8321 armnn::IWorkloadFactory& workloadFactory,
8322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8323{
8324 const unsigned int shape0[] = { 1, 1, 2, 2 };
8325 const unsigned int shape1[] = { 1, 1, 2, 1 };
8326
8327 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8328 std::vector<int16_t> input1({ 2, 1 });
8329 std::vector<int16_t> output({ 8, 11, 12, 15 });
8330
8331 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8332 memoryManager,
8333 shape0, input0, 1.0f, 0,
8334 shape1, input1, 1.0f, 0,
8335 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008336}
8337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008338LayerTestResult<float, 4> BatchNormTest(
8339 armnn::IWorkloadFactory& workloadFactory,
8340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008341{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008342 // BatchSize: 1
8343 // Channels: 2
8344 // Height: 3
8345 // Width: 2
8346
8347 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8348 std::vector<float> inputValues
8349 {
8350 // Batch 0, Channel 0, Height (3) x Width (2)
8351 1.f, 4.f,
8352 4.f, 2.f,
8353 1.f, 6.f,
8354
8355 // Batch 0, Channel 1, Height (3) x Width (2)
8356 1.f, 1.f,
8357 4.f, 1.f,
8358 -2.f, 4.f
8359 };
8360 std::vector<float> expectedOutputValues
8361 {
8362 // Batch 0, Channel 0, Height (3) x Width (2)
8363 1.f, 4.f,
8364 4.f, 2.f,
8365 1.f, 6.f,
8366
8367 // Batch 0, Channel 1, Height (3) x Width (2)
8368 3.f, 3.f,
8369 4.f, 3.f,
8370 2.f, 4.f
8371 };
8372
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008373 return BatchNormTestImpl<armnn::DataType::Float32>(
8374 workloadFactory, memoryManager,
8375 inputOutputShape, inputValues, expectedOutputValues,
8376 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008377}
8378
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008379LayerTestResult<float, 4> BatchNormNhwcTest(
8380 armnn::IWorkloadFactory& workloadFactory,
8381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008382{
8383 // BatchSize: 1
8384 // Height: 3
8385 // Width: 2
8386 // Channels: 2
8387
8388 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8389 std::vector<float> inputValues
8390 {
8391 // Batch 0, Height 0, Width (2) x Channel (2)
8392 1.f, 1.f,
8393 4.f, 1.f,
8394
8395 // Batch 0, Height 1, Width (2) x Channel (2)
8396 4.f, 4.f,
8397 2.f, 1.f,
8398
8399 // Batch 0, Height 2, Width (2) x Channel (2)
8400 1.f, -2.f,
8401 6.f, 4.f
8402 };
8403 std::vector<float> expectedOutputValues
8404 {
8405 // Batch 0, Height 0, Width (2) x Channel (2)
8406 1.f, 3.f,
8407 4.f, 3.f,
8408
8409 // Batch 0, Height 1, Width (2) x Channel (2)
8410 4.f, 4.f,
8411 2.f, 3.f,
8412
8413 // Batch 0, Height 2, Width (2) x Channel (2)
8414 1.f, 2.f,
8415 6.f, 4.f
8416 };
8417
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008418 return BatchNormTestImpl<armnn::DataType::Float32>(
8419 workloadFactory, memoryManager,
8420 inputOutputShape, inputValues, expectedOutputValues,
8421 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008422}
8423
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008424LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8425 armnn::IWorkloadFactory& workloadFactory,
8426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008427{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008428 // BatchSize: 1
8429 // Channels: 2
8430 // Height: 3
8431 // Width: 2
8432
8433 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8434 std::vector<float> inputValues
8435 {
8436 // Batch 0, Channel 0, Height (3) x Width (2)
8437 1.f, 4.f,
8438 4.f, 2.f,
8439 1.f, 6.f,
8440
8441 // Batch 0, Channel 1, Height (3) x Width (2)
8442 1.f, 1.f,
8443 4.f, 1.f,
8444 -2.f, 4.f
8445 };
8446 std::vector<float> expectedOutputValues
8447 {
8448 // Batch 0, Channel 0, Height (3) x Width (2)
8449 1.f, 4.f,
8450 4.f, 2.f,
8451 1.f, 6.f,
8452
8453 // Batch 0, Channel 1, Height (3) x Width (2)
8454 3.f, 3.f,
8455 4.f, 3.f,
8456 2.f, 4.f
8457 };
8458
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008459 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8460 workloadFactory, memoryManager,
8461 inputOutputShape, inputValues, expectedOutputValues,
8462 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008463}
8464
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008465LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8466 armnn::IWorkloadFactory& workloadFactory,
8467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008468{
8469 // BatchSize: 1
8470 // Height: 3
8471 // Width: 2
8472 // Channels: 2
8473
8474 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8475 std::vector<float> inputValues
8476 {
8477 // Batch 0, Height 0, Width (2) x Channel (2)
8478 1.f, 1.f,
8479 4.f, 1.f,
8480
8481 // Batch 0, Height 1, Width (2) x Channel (2)
8482 4.f, 4.f,
8483 2.f, 1.f,
8484
8485 // Batch 0, Height 2, Width (2) x Channel (2)
8486 1.f, -2.f,
8487 6.f, 4.f
8488 };
8489 std::vector<float> expectedOutputValues
8490 {
8491 // Batch 0, Height 0, Width (2) x Channel (2)
8492 1.f, 3.f,
8493 4.f, 3.f,
8494
8495 // Batch 0, Height 1, Width (2) x Channel (2)
8496 4.f, 4.f,
8497 2.f, 3.f,
8498
8499 // Batch 0, Height 2, Width (2) x Channel (2)
8500 1.f, 2.f,
8501 6.f, 4.f
8502 };
8503
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008504 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8505 (workloadFactory, memoryManager,
8506 inputOutputShape, inputValues, expectedOutputValues,
8507 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008508}
8509
Matteo Martincighf5507132019-06-04 10:59:47 +01008510LayerTestResult<int16_t, 4> BatchNormInt16Test(
8511 armnn::IWorkloadFactory& workloadFactory,
8512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8513{
8514 // BatchSize: 1
8515 // Channels: 2
8516 // Height: 3
8517 // Width: 2
8518
8519 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8520 std::vector<float> inputValues
8521 {
8522 // Batch 0, Channel 0, Height (3) x Width (2)
8523 1.f, 4.f,
8524 4.f, 2.f,
8525 1.f, 6.f,
8526
8527 // Batch 0, Channel 1, Height (3) x Width (2)
8528 1.f, 1.f,
8529 4.f, 1.f,
8530 -2.f, 4.f
8531 };
8532 std::vector<float> expectedOutputValues
8533 {
8534 // Batch 0, Channel 0, Height (3) x Width (2)
8535 1.f, 4.f,
8536 4.f, 2.f,
8537 1.f, 6.f,
8538
8539 // Batch 0, Channel 1, Height (3) x Width (2)
8540 3.f, 3.f,
8541 4.f, 3.f,
8542 2.f, 4.f
8543 };
8544
8545 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8546 workloadFactory, memoryManager,
8547 inputOutputShape, inputValues, expectedOutputValues,
8548 1.f/20.f, 50, armnn::DataLayout::NCHW);
8549}
8550
8551LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8552 armnn::IWorkloadFactory& workloadFactory,
8553 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8554{
8555 // BatchSize: 1
8556 // Height: 3
8557 // Width: 2
8558 // Channels: 2
8559
8560 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8561 std::vector<float> inputValues
8562 {
8563 // Batch 0, Height 0, Width (2) x Channel (2)
8564 1.f, 1.f,
8565 4.f, 1.f,
8566
8567 // Batch 0, Height 1, Width (2) x Channel (2)
8568 4.f, 4.f,
8569 2.f, 1.f,
8570
8571 // Batch 0, Height 2, Width (2) x Channel (2)
8572 1.f, -2.f,
8573 6.f, 4.f
8574 };
8575 std::vector<float> expectedOutputValues
8576 {
8577 // Batch 0, Height 0, Width (2) x Channel (2)
8578 1.f, 3.f,
8579 4.f, 3.f,
8580
8581 // Batch 0, Height 1, Width (2) x Channel (2)
8582 4.f, 4.f,
8583 2.f, 3.f,
8584
8585 // Batch 0, Height 2, Width (2) x Channel (2)
8586 1.f, 2.f,
8587 6.f, 4.f
8588 };
8589
8590 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8591 (workloadFactory, memoryManager,
8592 inputOutputShape, inputValues, expectedOutputValues,
8593 1.f/20.f, 50, armnn::DataLayout::NHWC);
8594}
8595
Nina Drozd58ef2c62019-05-16 12:09:18 +01008596LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008597 armnn::IWorkloadFactory& workloadFactory,
8598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008599{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008600 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008601}
8602
Nina Drozd58ef2c62019-05-16 12:09:18 +01008603LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8604 armnn::IWorkloadFactory& workloadFactory,
8605 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8606{
8607 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8608}
8609
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008610LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8611 armnn::IWorkloadFactory& workloadFactory,
8612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008613{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008614 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008615}
8616
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008617LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8618 armnn::IWorkloadFactory& workloadFactory,
8619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008621 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008622}
8623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008624LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8625 armnn::IWorkloadFactory& workloadFactory,
8626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008627{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008628 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008629}
8630
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008631LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8632 armnn::IWorkloadFactory& workloadFactory,
8633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008634{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008635 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8636 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008637}
8638
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008639LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8640 armnn::IWorkloadFactory& workloadFactory,
8641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008643 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8644 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008645}
8646
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008647LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8648 armnn::IWorkloadFactory& workloadFactory,
8649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008650{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008651 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008652}
8653
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008654LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8655 armnn::IWorkloadFactory& workloadFactory,
8656 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008657{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008658 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008659}
8660
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008661LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8662 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8664 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008665{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008666 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8667 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008668}
8669
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008670LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8671 armnn::IWorkloadFactory& workloadFactory,
8672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008673{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008674 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008675}
8676
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008677LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8678 armnn::IWorkloadFactory& workloadFactory,
8679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008680{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008681 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8682 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008683}
8684
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008685LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8686 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8688 bool useSubtensor)
8689{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008690 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8691 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008692}
8693
8694LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8695 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008697{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008698 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008699}
8700
8701LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8702 armnn::IWorkloadFactory& workloadFactory,
8703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8704{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008705 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008706}
8707
8708LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8709 armnn::IWorkloadFactory& workloadFactory,
8710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8711{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008712 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008713}
8714
8715LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8716 armnn::IWorkloadFactory& workloadFactory,
8717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8718{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008719 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8720 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008721}
8722
8723LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8724 armnn::IWorkloadFactory& workloadFactory,
8725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8726{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008727 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8728 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008729}
8730
8731LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8732 armnn::IWorkloadFactory& workloadFactory,
8733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8734{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008735 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8736 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008737}
8738
8739LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8740 armnn::IWorkloadFactory& workloadFactory,
8741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8742{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008743 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8744 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008745}
8746
8747LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8748 armnn::IWorkloadFactory& workloadFactory,
8749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8750 bool useSubtensor)
8751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008752 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8753 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008754}
8755
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008756LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8757 armnn::IWorkloadFactory& workloadFactory,
8758 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8759 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008760{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008761 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8762 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008763}
8764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008765LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8766 armnn::IWorkloadFactory& workloadFactory,
8767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8768 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008769{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008770 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008771 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008772}
8773
Teresa Charlin0434df62019-06-06 13:40:35 +01008774LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8775 armnn::IWorkloadFactory& workloadFactory,
8776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8777 bool forceNoPadding)
8778{
8779 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8780 workloadFactory, memoryManager, forceNoPadding);
8781}
8782
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008783LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8784 armnn::IWorkloadFactory& workloadFactory,
8785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8786 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008787{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008788 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8789 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008790}
8791
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008792LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8793 armnn::IWorkloadFactory& workloadFactory,
8794 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8795 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008796{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008797 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008798 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008799}
8800
Teresa Charlin0434df62019-06-06 13:40:35 +01008801LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8802 armnn::IWorkloadFactory& workloadFactory,
8803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8804 bool forceNoPadding)
8805{
8806 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8807 workloadFactory, memoryManager, forceNoPadding);
8808}
8809
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008810LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8811 armnn::IWorkloadFactory& workloadFactory,
8812 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008813 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008815 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008816}
8817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008818LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8819 armnn::IWorkloadFactory& workloadFactory,
8820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008821 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008822{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008823 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008824}
8825
Teresa Charlin0434df62019-06-06 13:40:35 +01008826LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8827 armnn::IWorkloadFactory& workloadFactory,
8828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8829 const armnn::DataLayout dataLayout)
8830{
8831 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8832}
8833LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8834 armnn::IWorkloadFactory& workloadFactory,
8835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8836{
8837 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8838}
8839
8840LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8841 armnn::IWorkloadFactory& workloadFactory,
8842 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8843{
8844 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8845 workloadFactory, memoryManager, 1.0f, -5);
8846}
8847
8848LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8849 armnn::IWorkloadFactory& workloadFactory,
8850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8851{
8852 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8853 workloadFactory, memoryManager);
8854}
8855
8856LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8857 armnn::IWorkloadFactory& workloadFactory,
8858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8859{
8860 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8861}
8862
8863LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8864 armnn::IWorkloadFactory& workloadFactory,
8865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8866{
8867 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8868 workloadFactory, memoryManager, 1.0f, -5);
8869}
8870
8871LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8872 armnn::IWorkloadFactory& workloadFactory,
8873 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8874{
8875 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8876 workloadFactory, memoryManager);
8877}
8878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008879LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8880 armnn::IWorkloadFactory& workloadFactory,
8881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008882 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008883{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008884 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008885}
8886
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008887LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8888 armnn::IWorkloadFactory& workloadFactory,
8889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008890 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008891{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008892 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008893 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008894}
8895
Teresa Charlin0434df62019-06-06 13:40:35 +01008896LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8897 armnn::IWorkloadFactory& workloadFactory,
8898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8899 const armnn::DataLayout dataLayout)
8900{
8901 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8902 workloadFactory, memoryManager, dataLayout);
8903}
8904
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008905LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8906 armnn::IWorkloadFactory& workloadFactory,
8907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8908 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008909{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008910 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008911 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008912}
8913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8915 armnn::IWorkloadFactory& workloadFactory,
8916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008917{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008918 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008919}
8920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008921LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8922 armnn::IWorkloadFactory& workloadFactory,
8923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008924{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008925 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8926 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008927}
8928
Teresa Charlin0434df62019-06-06 13:40:35 +01008929LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8930 armnn::IWorkloadFactory& workloadFactory,
8931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8932{
8933 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8934 workloadFactory, memoryManager);
8935}
8936LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8937 armnn::IWorkloadFactory& workloadFactory,
8938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8939{
8940 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8941}
8942
8943LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8944 armnn::IWorkloadFactory& workloadFactory,
8945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8946{
8947 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8948 workloadFactory, memoryManager);
8949}
8950
8951LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8952 armnn::IWorkloadFactory& workloadFactory,
8953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8954{
8955 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8956 workloadFactory, memoryManager);
8957}
8958
8959LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8960 armnn::IWorkloadFactory& workloadFactory,
8961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8962{
8963 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8964 workloadFactory, memoryManager);
8965}
8966
8967LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8968 armnn::IWorkloadFactory& workloadFactory,
8969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8970{
8971 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8972 workloadFactory, memoryManager);
8973}
8974
8975LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8976 armnn::IWorkloadFactory& workloadFactory,
8977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8978{
8979 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8980 workloadFactory, memoryManager);
8981}
8982
8983LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8984 armnn::IWorkloadFactory& workloadFactory,
8985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8986{
8987 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8988}
8989
8990LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8991 armnn::IWorkloadFactory& workloadFactory,
8992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8993{
8994 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8995 workloadFactory, memoryManager);
8996}
8997
8998LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8999 armnn::IWorkloadFactory& workloadFactory,
9000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9001{
9002 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
9003 workloadFactory, memoryManager);
9004}
9005
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009006LayerTestResult<float, 4> SimpleL2Pooling2dTest(
9007 armnn::IWorkloadFactory& workloadFactory,
9008 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009009 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009010{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009011 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009012}
9013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009014LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
9015 armnn::IWorkloadFactory& workloadFactory,
9016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00009017 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00009018{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009019 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00009020}
9021
Teresa Charlin0434df62019-06-06 13:40:35 +01009022LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
9023 armnn::IWorkloadFactory& workloadFactory,
9024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9025 const armnn::DataLayout dataLayout)
9026{
9027 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
9028}
9029
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009030LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
9031 armnn::IWorkloadFactory& workloadFactory,
9032 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009033{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009034 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009035}
9036
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009037LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9038 armnn::IWorkloadFactory& workloadFactory,
9039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009040{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009041 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009042}
9043
Teresa Charlin0434df62019-06-06 13:40:35 +01009044LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9045 armnn::IWorkloadFactory& workloadFactory,
9046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9047{
9048 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9049}
9050
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009051LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9052 armnn::IWorkloadFactory& workloadFactory,
9053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009054{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009055 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009056}
9057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009058LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9059 armnn::IWorkloadFactory& workloadFactory,
9060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009061{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009062 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009063}
9064
Teresa Charlin0434df62019-06-06 13:40:35 +01009065LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9066 armnn::IWorkloadFactory& workloadFactory,
9067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9068{
9069 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9070}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009071LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9072 armnn::IWorkloadFactory& workloadFactory,
9073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009074{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009075 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009076}
9077
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009078LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9079 armnn::IWorkloadFactory& workloadFactory,
9080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009081{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009082 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009083}
9084
Teresa Charlin0434df62019-06-06 13:40:35 +01009085LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9086 armnn::IWorkloadFactory& workloadFactory,
9087 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9088{
9089 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9090}
9091
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009092LayerTestResult<float, 4> L2Pooling2dSize7Test(
9093 armnn::IWorkloadFactory& workloadFactory,
9094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009095{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009096 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009097}
9098
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009099LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9100 armnn::IWorkloadFactory& workloadFactory,
9101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009102{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009103 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009104}
9105
Teresa Charlin0434df62019-06-06 13:40:35 +01009106LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9107 armnn::IWorkloadFactory& workloadFactory,
9108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9109{
9110 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9111}
9112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009113LayerTestResult<float, 4> L2Pooling2dSize9Test(
9114 armnn::IWorkloadFactory& workloadFactory,
9115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009116{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009117 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009118}
9119
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009120LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9121 armnn::IWorkloadFactory& workloadFactory,
9122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009123{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009124 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009125}
9126
Teresa Charlin0434df62019-06-06 13:40:35 +01009127LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9128 armnn::IWorkloadFactory& workloadFactory,
9129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9130{
9131 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9132}
9133LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9134 armnn::IWorkloadFactory& workloadFactory,
9135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9136{
9137 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9138}
9139
9140LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9141 armnn::IWorkloadFactory& workloadFactory,
9142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9143{
9144 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9145}
9146
9147LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9148 armnn::IWorkloadFactory& workloadFactory,
9149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9150{
9151 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9152}
9153
9154LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9155 armnn::IWorkloadFactory& workloadFactory,
9156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9157{
9158 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9159}
9160
9161LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9162 armnn::IWorkloadFactory& workloadFactory,
9163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9164{
9165 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9166}
9167
9168LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9169 armnn::IWorkloadFactory& workloadFactory,
9170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9171{
9172 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9173}
9174
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009175LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9176 armnn::IWorkloadFactory& workloadFactory,
9177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009178{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009179 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009180}
9181
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009182LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9183 armnn::IWorkloadFactory& workloadFactory,
9184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009186 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009187}
9188
Teresa Charlin0434df62019-06-06 13:40:35 +01009189LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9190 armnn::IWorkloadFactory& workloadFactory,
9191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9192{
9193 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9194}
9195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009196LayerTestResult<float, 4> ComparePooling2dTest(
9197 armnn::IWorkloadFactory& workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9199 armnn::IWorkloadFactory& refWorkloadFactory,
9200 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009201{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009202 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009203 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009204}
9205
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009206LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9207 armnn::IWorkloadFactory& workloadFactory,
9208 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9209 armnn::IWorkloadFactory& refWorkloadFactory,
9210 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009211{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009212 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009213 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009214}
9215
Teresa Charlin0434df62019-06-06 13:40:35 +01009216LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9217 armnn::IWorkloadFactory& workloadFactory,
9218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9219 armnn::IWorkloadFactory& refWorkloadFactory,
9220 armnn::PoolingAlgorithm poolingType)
9221{
9222 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9223 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9224}
9225
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009226LayerTestResult<float, 2> FullyConnectedLargeTest(
9227 armnn::IWorkloadFactory& workloadFactory,
9228 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9229 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009230{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009231 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009232}
9233
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009234LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9235 armnn::IWorkloadFactory& workloadFactory,
9236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009237{
9238 // Create Initial Tensor
9239 // 1, 2, 3
9240 // 4, 5, 6
9241 // 7, 8, 9
9242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009243 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9244 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009245
9246 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9247 {1, 2, 3,
9248 4, 5, 6,
9249 7, 8, 9
9250 });
9251
9252 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9253 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9254 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9255 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9256
9257 // Apply MaxPool poolSize = 1x1, stride=2x2
9258 // Result =
9259 // 1, 3
9260 // 7, 9
9261 armnn::Pooling2dDescriptor descriptor;
9262 descriptor.m_PoolHeight = 1;
9263 descriptor.m_PoolWidth = 1;
9264 descriptor.m_StrideX = 2;
9265 descriptor.m_StrideY = 2;
9266 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9267
9268 armnn::Pooling2dQueueDescriptor queueDescriptor;
9269 queueDescriptor.m_Parameters = descriptor;
9270 armnn::WorkloadInfo workloadInfo;
9271 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9272 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9273
9274 // Create the MaxPool
9275 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9276
9277 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9278 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9279 boost::multi_array<float, 4> resultMaxPool;
9280 resultMaxPool.resize(shape);
9281
9282
9283 // Create addition with another tensor the same size
9284 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9285 // with the initial tensor.
9286 // 12, 16
9287 // 24, 28
9288
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009289 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9290 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009291
9292 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9293 {12, 16,
9294 24, 28,
9295 });
9296
9297 // Expected output tensor after MaxPool and Addition.
9298 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9299 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9300 {
9301 13, 19,
9302 31, 37
9303 }));
9304
9305 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9306 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9307
9308 armnn::AdditionQueueDescriptor data;
9309 armnn::WorkloadInfo info;
9310
9311 // Add the output of the MaxPool and the new tensor
9312 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9313 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9314 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9315
9316 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9317
9318 poolingInputHandle->Allocate();
9319 poolingOutputHandle->Allocate();
9320 addInputHandle->Allocate();
9321 addOutputHandle->Allocate();
9322
9323 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9324 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9325
9326 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9327 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9328
Derek Lambertif30f7d32019-04-09 10:25:02 +01009329 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009330 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009331 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009332 addWorkload->Execute();
9333
9334 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9335
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009336 return addRet;
9337}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009338
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009339LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9340 armnn::IWorkloadFactory& workloadFactory,
9341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009342{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009343 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009344}
9345
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009346LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9347 armnn::IWorkloadFactory& workloadFactory,
9348 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009349{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009350 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009351}
9352
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009353LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9354 armnn::IWorkloadFactory& workloadFactory,
9355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009356{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009357 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009358}
9359
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009360LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9361 armnn::IWorkloadFactory& workloadFactory,
9362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009363{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009364 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009365}
9366
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009367LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9368 armnn::IWorkloadFactory& workloadFactory,
9369 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009370{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009371 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009372}
9373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009374LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9375 armnn::IWorkloadFactory& workloadFactory,
9376 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009377{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009378 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009379}
9380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009381LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9382 armnn::IWorkloadFactory& workloadFactory,
9383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009384{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009385 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009386}
9387
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009388LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9389 armnn::IWorkloadFactory& workloadFactory,
9390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009391{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009392 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009393}
9394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009395LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9396 armnn::IWorkloadFactory& workloadFactory,
9397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009398{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009399 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009400}
9401
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009402LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9403 armnn::IWorkloadFactory& workloadFactory,
9404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009405{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009406 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009407}
9408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009409LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9410 armnn::IWorkloadFactory& workloadFactory,
9411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009412{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009413 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009414}
9415
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009416LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9417 armnn::IWorkloadFactory& workloadFactory,
9418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009419{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009420 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009421}
9422
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009423LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9424 armnn::IWorkloadFactory& workloadFactory,
9425 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009426{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009427 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009428}
9429
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009430LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9431 armnn::IWorkloadFactory& workloadFactory,
9432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009433{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009434 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009435}
9436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009437LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9438 armnn::IWorkloadFactory& workloadFactory,
9439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009441 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009442}
9443
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009444LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9445 armnn::IWorkloadFactory& workloadFactory,
9446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009447{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009448 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009449}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009450
nikraj01120522a2019-05-31 11:33:07 +01009451LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9452 armnn::IWorkloadFactory& workloadFactory,
9453 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9454{
9455 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9456}
9457
9458LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9459 armnn::IWorkloadFactory& workloadFactory,
9460 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9461{
9462 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9463}
9464
9465LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9466 armnn::IWorkloadFactory& workloadFactory,
9467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9468{
9469 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9470}
9471
9472LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9473 armnn::IWorkloadFactory& workloadFactory,
9474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9475{
9476 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9477}
9478
9479LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9480 armnn::IWorkloadFactory& workloadFactory,
9481 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9482{
9483 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9484}
9485
9486LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9487 armnn::IWorkloadFactory& workloadFactory,
9488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9489{
9490 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9491}
9492
9493LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9494 armnn::IWorkloadFactory& workloadFactory,
9495 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9496{
9497 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9498}
9499
9500LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9501 armnn::IWorkloadFactory& workloadFactory,
9502 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9503{
9504 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9505}
9506
Keith Davisa57eccb2019-06-14 17:33:22 +01009507LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9508 armnn::IWorkloadFactory& workloadFactory,
9509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9510{
James Conroyd2aa85e2019-07-01 17:12:40 +01009511 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009512 workloadFactory,
9513 memoryManager);
9514}
9515
9516LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9517 armnn::IWorkloadFactory& workloadFactory,
9518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9519{
James Conroyd2aa85e2019-07-01 17:12:40 +01009520 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009521 workloadFactory,
9522 memoryManager,
9523 armnn::DataLayout::NCHW);
9524}
9525
James Conroyd2aa85e2019-07-01 17:12:40 +01009526LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009527 armnn::IWorkloadFactory& workloadFactory,
9528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9529{
James Conroyd2aa85e2019-07-01 17:12:40 +01009530 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009531 workloadFactory,
9532 memoryManager);
9533}
9534
James Conroyd2aa85e2019-07-01 17:12:40 +01009535LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009536 armnn::IWorkloadFactory& workloadFactory,
9537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9538{
James Conroyd2aa85e2019-07-01 17:12:40 +01009539 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9540 workloadFactory,
9541 memoryManager,
9542 armnn::DataLayout::NCHW);
9543}
9544
9545LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9546 armnn::IWorkloadFactory& workloadFactory,
9547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9548{
9549 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9550 workloadFactory,
9551 memoryManager);
9552}
9553
9554LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9555 armnn::IWorkloadFactory& workloadFactory,
9556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9557{
9558 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9559 workloadFactory,
9560 memoryManager,
9561 armnn::DataLayout::NCHW);
9562}
9563
9564LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9565 armnn::IWorkloadFactory& workloadFactory,
9566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9567{
9568 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9569 workloadFactory,
9570 memoryManager);
9571}
9572
9573LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9574 armnn::IWorkloadFactory& workloadFactory,
9575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9576{
9577 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009578 workloadFactory,
9579 memoryManager,
9580 armnn::DataLayout::NCHW);
9581}
9582
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009583namespace {
9584
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009585} // anonymous namespace
9586
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009587LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9588 armnn::IWorkloadFactory& workloadFactory,
9589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9590{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009591 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009592}
9593
9594LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9595 armnn::IWorkloadFactory& workloadFactory,
9596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9597{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009598 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009599}
9600
9601LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9602 armnn::IWorkloadFactory& workloadFactory,
9603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9604{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009605 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009606}
9607
9608LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9609 armnn::IWorkloadFactory& workloadFactory,
9610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009612 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009613}
9614
9615LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9616 armnn::IWorkloadFactory& workloadFactory,
9617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9618{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009619 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009620}
9621
9622LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9623 armnn::IWorkloadFactory& workloadFactory,
9624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9625{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009626 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009627}
9628
9629LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9630 armnn::IWorkloadFactory& workloadFactory,
9631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9632{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009633 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009634}
9635
9636LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9637 armnn::IWorkloadFactory& workloadFactory,
9638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9639{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009640 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009641}
9642
9643LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9644 armnn::IWorkloadFactory& workloadFactory,
9645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009647 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009648}
9649
9650LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9651 armnn::IWorkloadFactory& workloadFactory,
9652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9653{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009654 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009655}
9656
9657LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9658 armnn::IWorkloadFactory& workloadFactory,
9659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9660{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009661 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009662}
9663
9664LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9665 armnn::IWorkloadFactory& workloadFactory,
9666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9667{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009668 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009669}
9670
9671LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9672 armnn::IWorkloadFactory& workloadFactory,
9673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9674{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009675 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009676}
9677
9678LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9679 armnn::IWorkloadFactory& workloadFactory,
9680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9681{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009682 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009683}
9684
9685LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9686 armnn::IWorkloadFactory& workloadFactory,
9687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9688{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009689 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009690}
9691
9692LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9693 armnn::IWorkloadFactory& workloadFactory,
9694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9695{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009696 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009697}
9698
9699LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9700 armnn::IWorkloadFactory& workloadFactory,
9701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9702{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009703 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009704}
9705
9706LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9707 armnn::IWorkloadFactory& workloadFactory,
9708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009710 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009711}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009712
Matteo Martincigh42666a12019-05-29 08:53:41 +01009713LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9714 armnn::IWorkloadFactory& workloadFactory,
9715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9716{
9717 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9718}
9719
9720LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9721 armnn::IWorkloadFactory& workloadFactory,
9722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9723{
9724 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9725}
9726
9727LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9728 armnn::IWorkloadFactory& workloadFactory,
9729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9730{
9731 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9732}
9733
9734LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9735 armnn::IWorkloadFactory& workloadFactory,
9736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9737{
9738 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9739}
9740
9741LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9742 armnn::IWorkloadFactory& workloadFactory,
9743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9744{
9745 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9746}
9747
9748LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9749 armnn::IWorkloadFactory& workloadFactory,
9750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9751{
9752 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9753}
9754
9755LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9756 armnn::IWorkloadFactory& workloadFactory,
9757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9758{
9759 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9760}
9761
9762LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9763 armnn::IWorkloadFactory& workloadFactory,
9764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9765{
9766 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9767}
9768
9769LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9770 armnn::IWorkloadFactory& workloadFactory,
9771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9772{
9773 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9774}
9775
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009776LayerTestResult<float, 4> Debug4DFloat32Test(
9777 armnn::IWorkloadFactory& workloadFactory,
9778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009780 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009781}
9782
9783LayerTestResult<float, 3> Debug3DFloat32Test(
9784 armnn::IWorkloadFactory& workloadFactory,
9785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9786{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009787 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009788}
9789
9790LayerTestResult<float, 2> Debug2DFloat32Test(
9791 armnn::IWorkloadFactory& workloadFactory,
9792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009794 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009795}
9796
9797LayerTestResult<float, 1> Debug1DFloat32Test(
9798 armnn::IWorkloadFactory& workloadFactory,
9799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009801 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009802}
9803
9804LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9805 armnn::IWorkloadFactory& workloadFactory,
9806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9807{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009808 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009809}
9810
9811LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9812 armnn::IWorkloadFactory& workloadFactory,
9813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9814{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009815 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009816}
9817
9818LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9819 armnn::IWorkloadFactory& workloadFactory,
9820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9821{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009822 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009823}
9824
9825LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9826 armnn::IWorkloadFactory& workloadFactory,
9827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9828{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009829 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009830}
Matteo Martincigh49124022019-01-11 13:25:59 +00009831
narpra014951d842019-01-18 16:53:53 +00009832LayerTestResult<float, 1> Gather1DParamsFloatTest(
9833 armnn::IWorkloadFactory& workloadFactory,
9834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9835{
9836 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9837}
9838
9839LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9840 armnn::IWorkloadFactory& workloadFactory,
9841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9842{
9843 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9844}
9845
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009846LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
9847 armnn::IWorkloadFactory& workloadFactory,
9848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9849{
9850 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9851}
9852
narpra014951d842019-01-18 16:53:53 +00009853LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9854 armnn::IWorkloadFactory& workloadFactory,
9855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9856{
9857 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9858}
9859
9860LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9861 armnn::IWorkloadFactory& workloadFactory,
9862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9863{
9864 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9865}
9866
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009867LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
9868 armnn::IWorkloadFactory& workloadFactory,
9869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9870{
9871 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9872}
9873
narpra014951d842019-01-18 16:53:53 +00009874LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9875 armnn::IWorkloadFactory& workloadFactory,
9876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9877{
9878 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9879}
9880
9881LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9882 armnn::IWorkloadFactory& workloadFactory,
9883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9884{
9885 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9886 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009887}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009888
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009889LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
9890 armnn::IWorkloadFactory& workloadFactory,
9891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9892{
9893 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
9894 workloadFactory, memoryManager);
9895}
9896
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009897LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009898 armnn::IWorkloadFactory& workloadFactory,
9899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9900{
9901 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9902}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009903
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009904LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9905 armnn::IWorkloadFactory& workloadFactory,
9906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9907{
9908 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9909}
9910
9911LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9912 armnn::IWorkloadFactory& workloadFactory,
9913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9914{
9915 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9916}
9917
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009918LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9919 armnn::IWorkloadFactory& workloadFactory,
9920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9921{
9922 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9923}
9924
9925LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9926 armnn::IWorkloadFactory& workloadFactory,
9927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9928{
9929 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9930}
9931
9932LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9933 armnn::IWorkloadFactory& workloadFactory,
9934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9935{
9936 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9937}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01009938
9939//
9940// TransposeConvolution2d
9941//
9942
9943// Simple biased
9944LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
9945 armnn::IWorkloadFactory& workloadFactory,
9946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9947{
9948 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9949 workloadFactory,
9950 memoryManager,
9951 true,
9952 armnn::DataLayout::NCHW);
9953}
9954
9955LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
9956 armnn::IWorkloadFactory& workloadFactory,
9957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9958{
9959 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9960 workloadFactory,
9961 memoryManager,
9962 true,
9963 armnn::DataLayout::NHWC);
9964}
9965
9966LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
9967 armnn::IWorkloadFactory& workloadFactory,
9968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9969{
9970 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9971 workloadFactory,
9972 memoryManager,
9973 true,
9974 armnn::DataLayout::NCHW);
9975}
9976
9977LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
9978 armnn::IWorkloadFactory& workloadFactory,
9979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9980{
9981 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9982 workloadFactory,
9983 memoryManager,
9984 true,
9985 armnn::DataLayout::NHWC);
9986}
9987
9988LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
9989 armnn::IWorkloadFactory& workloadFactory,
9990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9991{
9992 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9993 workloadFactory,
9994 memoryManager,
9995 true,
9996 armnn::DataLayout::NCHW);
9997}
9998
9999LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
10000 armnn::IWorkloadFactory& workloadFactory,
10001 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10002{
10003 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10004 workloadFactory,
10005 memoryManager,
10006 true,
10007 armnn::DataLayout::NHWC);
10008}
10009
10010// Simple unbiased
10011LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
10012 armnn::IWorkloadFactory& workloadFactory,
10013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10014{
10015 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10016 workloadFactory,
10017 memoryManager,
10018 false,
10019 armnn::DataLayout::NCHW);
10020}
10021
10022LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
10023 armnn::IWorkloadFactory& workloadFactory,
10024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10025{
10026 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10027 workloadFactory,
10028 memoryManager,
10029 false,
10030 armnn::DataLayout::NHWC);
10031}
10032
10033LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10034 armnn::IWorkloadFactory& workloadFactory,
10035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10036{
10037 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10038 workloadFactory,
10039 memoryManager,
10040 false,
10041 armnn::DataLayout::NCHW);
10042}
10043
10044LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10045 armnn::IWorkloadFactory& workloadFactory,
10046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10047{
10048 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10049 workloadFactory,
10050 memoryManager,
10051 false,
10052 armnn::DataLayout::NHWC);
10053}
10054
10055LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10056 armnn::IWorkloadFactory& workloadFactory,
10057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10058{
10059 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10060 workloadFactory,
10061 memoryManager,
10062 false,
10063 armnn::DataLayout::NCHW);
10064}
10065
10066LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10067 armnn::IWorkloadFactory& workloadFactory,
10068 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10069{
10070 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10071 workloadFactory,
10072 memoryManager,
10073 false,
10074 armnn::DataLayout::NHWC);
10075}
10076
10077// Padded biased
10078LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10079 armnn::IWorkloadFactory& workloadFactory,
10080 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10081{
10082 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10083 workloadFactory,
10084 memoryManager,
10085 true,
10086 armnn::DataLayout::NCHW);
10087}
10088
10089LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10090 armnn::IWorkloadFactory& workloadFactory,
10091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10092{
10093 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10094 workloadFactory,
10095 memoryManager,
10096 true,
10097 armnn::DataLayout::NHWC);
10098}
10099
10100LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10101 armnn::IWorkloadFactory& workloadFactory,
10102 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10103{
10104 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10105 workloadFactory,
10106 memoryManager,
10107 true,
10108 armnn::DataLayout::NCHW);
10109}
10110
10111LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10112 armnn::IWorkloadFactory& workloadFactory,
10113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10114{
10115 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10116 workloadFactory,
10117 memoryManager,
10118 true,
10119 armnn::DataLayout::NHWC);
10120}
10121
10122LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10123 armnn::IWorkloadFactory& workloadFactory,
10124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10125{
10126 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10127 workloadFactory,
10128 memoryManager,
10129 true,
10130 armnn::DataLayout::NCHW);
10131}
10132
10133LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10134 armnn::IWorkloadFactory& workloadFactory,
10135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10136{
10137 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10138 workloadFactory,
10139 memoryManager,
10140 true,
10141 armnn::DataLayout::NHWC);
10142}
10143
10144// Padded unbiased
10145LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10146 armnn::IWorkloadFactory& workloadFactory,
10147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10148{
10149 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10150 workloadFactory,
10151 memoryManager,
10152 false,
10153 armnn::DataLayout::NCHW);
10154}
10155
10156LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10157 armnn::IWorkloadFactory& workloadFactory,
10158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10159{
10160 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10161 workloadFactory,
10162 memoryManager,
10163 false,
10164 armnn::DataLayout::NHWC);
10165}
10166
10167LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10168 armnn::IWorkloadFactory& workloadFactory,
10169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10170{
10171 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10172 workloadFactory,
10173 memoryManager,
10174 false,
10175 armnn::DataLayout::NCHW);
10176}
10177
10178LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10179 armnn::IWorkloadFactory& workloadFactory,
10180 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10181{
10182 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10183 workloadFactory,
10184 memoryManager,
10185 false,
10186 armnn::DataLayout::NHWC);
10187}
10188
10189LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10190 armnn::IWorkloadFactory& workloadFactory,
10191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10192{
10193 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10194 workloadFactory,
10195 memoryManager,
10196 false,
10197 armnn::DataLayout::NCHW);
10198}
10199
10200LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10201 armnn::IWorkloadFactory& workloadFactory,
10202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10203{
10204 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10205 workloadFactory,
10206 memoryManager,
10207 false,
10208 armnn::DataLayout::NHWC);
10209}
10210
10211// Strided biased
10212LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10213 armnn::IWorkloadFactory& workloadFactory,
10214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10215{
10216 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10217 workloadFactory,
10218 memoryManager,
10219 true,
10220 armnn::DataLayout::NCHW);
10221}
10222
10223LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10224 armnn::IWorkloadFactory& workloadFactory,
10225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10226{
10227 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10228 workloadFactory,
10229 memoryManager,
10230 true,
10231 armnn::DataLayout::NHWC);
10232}
10233
10234LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10235 armnn::IWorkloadFactory& workloadFactory,
10236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10237{
10238 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10239 workloadFactory,
10240 memoryManager,
10241 true,
10242 armnn::DataLayout::NCHW);
10243}
10244
10245LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10246 armnn::IWorkloadFactory& workloadFactory,
10247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10248{
10249 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10250 workloadFactory,
10251 memoryManager,
10252 true,
10253 armnn::DataLayout::NHWC);
10254}
10255
10256LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10257 armnn::IWorkloadFactory& workloadFactory,
10258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10259{
10260 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10261 workloadFactory,
10262 memoryManager,
10263 true,
10264 armnn::DataLayout::NCHW);
10265}
10266
10267LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10268 armnn::IWorkloadFactory& workloadFactory,
10269 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10270{
10271 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10272 workloadFactory,
10273 memoryManager,
10274 true,
10275 armnn::DataLayout::NHWC);
10276}
10277
10278// Strided unbiased
10279LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10280 armnn::IWorkloadFactory& workloadFactory,
10281 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10282{
10283 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10284 workloadFactory,
10285 memoryManager,
10286 false,
10287 armnn::DataLayout::NCHW);
10288}
10289
10290LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10291 armnn::IWorkloadFactory& workloadFactory,
10292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10293{
10294 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10295 workloadFactory,
10296 memoryManager,
10297 false,
10298 armnn::DataLayout::NHWC);
10299}
10300
10301LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10302 armnn::IWorkloadFactory& workloadFactory,
10303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10304{
10305 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10306 workloadFactory,
10307 memoryManager,
10308 false,
10309 armnn::DataLayout::NCHW);
10310}
10311
10312LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10313 armnn::IWorkloadFactory& workloadFactory,
10314 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10315{
10316 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10317 workloadFactory,
10318 memoryManager,
10319 false,
10320 armnn::DataLayout::NHWC);
10321}
10322
10323LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10324 armnn::IWorkloadFactory& workloadFactory,
10325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10326{
10327 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10328 workloadFactory,
10329 memoryManager,
10330 false,
10331 armnn::DataLayout::NCHW);
10332}
10333
10334LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10335 armnn::IWorkloadFactory& workloadFactory,
10336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10337{
10338 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10339 workloadFactory,
10340 memoryManager,
10341 false,
10342 armnn::DataLayout::NHWC);
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +010010343}