blob: b8f0541ee2f1c1b97fb8002cb678425faae3e007 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000034#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000035#include "SpaceToBatchNdTestImpl.hpp"
Keith Davisa57eccb2019-06-14 17:33:22 +010036#include "SpaceToDepthTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010041#include "LstmTestImpl.hpp"
42#include "ConvertFp16ToFp32TestImpl.hpp"
43#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000044#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000045#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010046#include "QuantizeTestImpl.hpp"
Aron Virginas-Tar735a4502019-06-26 15:02:47 +010047#include "TransposeConvolution2dTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Teresa Charlinedeeb162019-06-14 11:09:19 +0100586LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
587 armnn::IWorkloadFactory& workloadFactory,
588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
589 armnn::DataLayout layout)
590{
591 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
592 workloadFactory, memoryManager, layout, 0.0f, 0);
593}
594
595LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
596 armnn::IWorkloadFactory& workloadFactory,
597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
598 armnn::DataLayout layout)
599{
600 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
601 <armnn::DataType::Float32, armnn::DataType::Float32>(
602 workloadFactory, memoryManager, layout, 0.0f, 0);
603}
604
605LayerTestResult<float, 4> Convolution1dTest(
606 armnn::IWorkloadFactory& workloadFactory,
607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
608 bool biasEnabled)
609{
610 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
611 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
612}
613
614LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
615 armnn::IWorkloadFactory& workloadFactory,
616 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
617 bool biasEnabled)
618{
619 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
620 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
621}
622
623LayerTestResult<float,4> CompareConvolution2dTest(
624 armnn::IWorkloadFactory& workloadFactory,
625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
626 armnn::IWorkloadFactory& refWorkloadFactory)
627{
628 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
629 workloadFactory, memoryManager, refWorkloadFactory);
630}
631
632template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
633LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 const std::vector<float>& inputNoQuantizedValues,
637 armnn::TensorInfo& inputTensorInfo,
638 const std::vector<float>& kernelNoQuantizedValues,
639 armnn::TensorInfo& kernelTensorInfo,
640 const std::vector<float>& outputExpectedNoQuantizedValues,
641 armnn::TensorInfo& outputTensorInfo,
642 uint32_t dilationX,
643 uint32_t dilationY,
644 armnn::DataLayout layout = armnn::DataLayout::NCHW,
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100645 uint32_t padLeft = 0,
646 uint32_t padTop = 0,
647 uint32_t padRight = 0,
648 uint32_t padBottom = 0,
649 uint32_t strideX = 1,
650 uint32_t strideY = 1,
Teresa Charlinedeeb162019-06-14 11:09:19 +0100651 bool biasEnabled = false
652)
653{
654 float qScale;
655 int32_t qOffset;
656 switch (ArmnnType)
657 {
658 case armnn::DataType::QuantisedAsymm8:
659 {
660 qScale = 0.1f;
661 qOffset = 128;
662 break;
663 }
664 case armnn::DataType::QuantisedSymm16:
665 {
666 qScale = 0.1f;
667 qOffset = 0;
668 break;
669 }
670 case armnn::DataType::Float32:
671 default:
672 {
673 qScale = 0.f;
674 qOffset = 0;
675 break;
676 }
677 }
678
679 inputTensorInfo.SetQuantizationScale(qScale);
680 inputTensorInfo.SetQuantizationOffset(qOffset);
681 kernelTensorInfo.SetQuantizationScale(qScale);
682 kernelTensorInfo.SetQuantizationOffset(qOffset);
683 outputTensorInfo.SetQuantizationScale(qScale);
684 outputTensorInfo.SetQuantizationOffset(qOffset);
685
686 auto input = MakeTensor<T, 4>(inputTensorInfo,
687 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
688 inputTensorInfo.GetQuantizationOffset(),
689 inputNoQuantizedValues)));
690 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
691 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
692 kernelTensorInfo.GetQuantizationOffset(),
693 kernelNoQuantizedValues)));
694 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
695 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
696 outputTensorInfo.GetQuantizationOffset(),
697 outputExpectedNoQuantizedValues)));
698
Teresa Charlinedeeb162019-06-14 11:09:19 +0100699 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
700 workloadFactory,
701 memoryManager,
702 input,
703 kernel,
704 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
705 expectedOutput,
706 qScale,
707 qOffset,
708 layout,
709 padLeft,
710 padTop,
711 padRight,
712 padBottom,
713 strideX,
714 strideY,
715 dilationX,
716 dilationY);
717}
718
719template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
720LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
721 armnn::IWorkloadFactory& workloadFactory,
722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
723 bool biasEnabled,
724 const armnn::DataLayout layout)
725{
726 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
727 std::vector<float> inputNoQuantizedValues =
728 {
729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
731 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
732 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
733 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
734 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
735 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
736 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
739 };
740
741 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
742 std::vector<float> kernelNoQuantizedValues =
743 {
744 1, 2, 3,
745 4, 5, 6,
746 7, 8, 9
747 };
748
749 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
750 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
751 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
752 std::vector<float> outputExpectedNoQuantizedValues =
753 {
754 6., 5., 5., 5.,
755 6., 5., 5., 5.,
756 6., 5., 5., 5.,
757 3., 2., 2., 2.
758 };
759
760 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
761 workloadFactory,
762 memoryManager,
763 inputNoQuantizedValues,
764 inputTensorInfo,
765 kernelNoQuantizedValues,
766 kernelTensorInfo,
767 outputExpectedNoQuantizedValues,
768 outputTensorInfo,
769 3,
770 3,
771 layout,
772 biasEnabled);
773}
774
775template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
776LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
777 armnn::IWorkloadFactory& workloadFactory,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
779 bool biasEnabled,
780 const armnn::DataLayout layout)
781{
782 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
783 std::vector<float> inputNoQuantizedValues =
784 {
785 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
789 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
790 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
791 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
792 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
793 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
794 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
795
796 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
797 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
798 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
799 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
800 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
801 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
802 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
803 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
804 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
805 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
806 };
807
808 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
809 std::vector<float> kernelNoQuantizedValues =
810 {
811 1, 2, 3,
812 4, 5, 6,
813 7, 8, 9,
814
815 1, 2, 3,
816 4, 5, 6,
817 7, 8, 9
818 };
819
820 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
821 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
822 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
823 std::vector<float> outputExpectedNoQuantizedValues =
824 {
825 12., 10., 10., 10.,
826 12., 10., 10., 10.,
827 12., 10., 10., 10.,
Teresa Charlin20b1f882019-06-19 09:34:37 +0100828 6., 4., 4., 4.
Teresa Charlinedeeb162019-06-14 11:09:19 +0100829 };
830
831 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
832 workloadFactory,
833 memoryManager,
834 inputNoQuantizedValues,
835 inputTensorInfo,
836 kernelNoQuantizedValues,
837 kernelTensorInfo,
838 outputExpectedNoQuantizedValues,
839 outputTensorInfo,
840 3,
841 3,
842 layout,
843 biasEnabled);
844}
845
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100846template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
847LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
848 armnn::IWorkloadFactory &workloadFactory,
849 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
850 bool biasEnabled,
851 const armnn::DataLayout layout)
852{
853 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
854 std::vector<float> inputNoQuantizedValues =
855 {
856 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
857 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
858 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
859 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
860 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
861 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
862 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
863 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
864 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
865 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
866 };
867
868 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2}, ArmnnType);
869 std::vector<float> kernelNoQuantizedValues =
870 {
871 1, 2,
872 3, 4
873 };
874
875 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
876 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
877 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 1
878 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
879 std::vector<float> outputExpectedNoQuantizedValues =
880 {
881 4, 7, 7, 3,
882 6, 10, 10, 4,
883 6, 10, 10, 4,
884 2, 3, 3, 1
885 };
886 uint32_t padLeft = 1;
887 uint32_t padTop = 1;
888 uint32_t padRight = 1;
889 uint32_t padBottom = 1;
890
891 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
892 workloadFactory,
893 memoryManager,
894 inputNoQuantizedValues,
895 inputTensorInfo,
896 kernelNoQuantizedValues,
897 kernelTensorInfo,
898 outputExpectedNoQuantizedValues,
899 outputTensorInfo,
900 2,
901 2,
902 layout,
903 padLeft,
904 padTop,
905 padRight,
906 padBottom,
907 3,
908 3,
909 biasEnabled
910 );
911}
912
Teresa Charlinedeeb162019-06-14 11:09:19 +0100913template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
914Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
915 armnn::IWorkloadFactory&,
916 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
917 bool,
918 armnn::DataLayout);
919
920template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
921Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
922 armnn::IWorkloadFactory&,
923 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
924 bool,
925 armnn::DataLayout);
926
927template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
928Convolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
929 armnn::IWorkloadFactory&,
930 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
931 bool,
932 armnn::DataLayout);
933
934template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
935Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
936 armnn::IWorkloadFactory&,
937 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
938 bool,
939 armnn::DataLayout);
940
941template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
942Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
943 armnn::IWorkloadFactory&,
944 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
945 bool,
946 armnn::DataLayout);
947
948template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
949Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
950 armnn::IWorkloadFactory&,
951 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
952 bool,
953 armnn::DataLayout);
954
Teresa Charlin2b7519d2019-07-09 15:45:35 +0100955template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
956Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
957 armnn::IWorkloadFactory &workloadFactory,
958 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
959 bool biasEnabled,
960 const armnn::DataLayout layout);
961
962template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
963Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
964 armnn::IWorkloadFactory &workloadFactory,
965 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
966 bool biasEnabled,
967 const armnn::DataLayout layout);
968
969template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
970Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
971 armnn::IWorkloadFactory &workloadFactory,
972 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
973 bool biasEnabled,
974 const armnn::DataLayout layout);
975
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000976template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
977 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000978LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
979 armnn::IWorkloadFactory& workloadFactory,
980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
981 float qScale,
982 int32_t qOffset,
983 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000984 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100985{
telsoa01c577f2c2018-08-31 09:22:23 +0100986 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000987 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100988 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +0100989 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
990 {
surmeh013537c2c2018-05-18 16:31:43 +0100991 0, 1, 2, 3, 4,
992 5, 6, 7, 8, 9,
993 10, 11, 12, 13, 14,
994 15, 16, 17, 18, 19,
995 20, 21, 22, 23, 24,
996
997 25, 26, 27, 28, 29,
998 30, 31, 32, 33, 34,
999 35, 36, 37, 38, 39,
1000 40, 41, 42, 43, 44,
1001 45, 46, 47, 48, 49
1002 })));
1003
telsoa01c577f2c2018-08-31 09:22:23 +01001004 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001005 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001006 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001007 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1008 {
surmeh013537c2c2018-05-18 16:31:43 +01001009 32, 31, 30, 29,
1010 28, 27, 26, 25,
1011 24, 23, 22, 21,
1012 20, 19, 18, 17,
1013
1014 16, 15, 14, 13,
1015 12, 11, 10, 9,
1016 8, 7, 6, 5,
1017 4, 3, 2, 1
1018 })));
1019
telsoa01c577f2c2018-08-31 09:22:23 +01001020 // Expected output is 1 batch of a 2-channel 5x5 image.
1021 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001022 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +01001023 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001024 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1025 {
surmeh013537c2c2018-05-18 16:31:43 +01001026 1062, 1580, 1850, 1530, 1117,
1027 2140, 3108, 3500, 2842, 2042,
1028 3580, 5068, 5460, 4342, 3062,
1029 3618, 5072, 5390, 4248, 2971,
1030 3074, 4282, 4510, 3533, 2457,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001031
surmeh013537c2c2018-05-18 16:31:43 +01001032 1550, 2284, 2362, 1955, 1428,
1033 2910, 4206, 4342, 3528, 2536,
1034 3390, 4886, 5022, 4068, 2916,
1035 3566, 5056, 5182, 4133, 2922,
1036 3100, 4352, 4452, 3517, 2465
1037 })));
1038
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001039 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
1040 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001041 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01001042 input,
1043 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001044 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +01001045 expectedOutput,
1046 qScale,
1047 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +01001048 layout,
telsoa01c577f2c2018-08-31 09:22:23 +01001049 1, // Padding left.
1050 1, // Padding top.
1051 2, // Padding right.
1052 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +01001053 1, // strideX
1054 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +00001055}
1056
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001057template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1058 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001059LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
1060 armnn::IWorkloadFactory& workloadFactory,
1061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1062 float qScale,
1063 int32_t qOffset,
1064 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001065{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001066 auto layout = armnn::DataLayout::NHWC;
1067
1068 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001069 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001070 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1071 {
1072 0, 1, 2, 3, 4,
1073 5, 6, 7, 8, 9,
1074 10, 11, 12, 13, 14,
1075 15, 16, 17, 18, 19,
1076 20, 21, 22, 23, 24,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001077
Teresa Charlin20b1f882019-06-19 09:34:37 +01001078 25, 26, 27, 28, 29,
1079 30, 31, 32, 33, 34,
1080 35, 36, 37, 38, 39,
1081 40, 41, 42, 43, 44,
1082 45, 46, 47, 48, 49
Nikhil Rajcec6b652018-10-12 13:51:57 +01001083 })));
1084
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001085 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001086 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001087 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1088 {
Matteo Martincigh747ef822018-12-18 09:26:39 +00001089 32, 31, 30, 29,
1090 28, 27, 26, 25,
1091 24, 23, 22, 21,
1092 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001093
Matteo Martincigh747ef822018-12-18 09:26:39 +00001094 16, 15, 14, 13,
1095 12, 11, 10, 9,
1096 8, 7, 6, 5,
1097 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +01001098 })));
1099
Teresa Charlin20b1f882019-06-19 09:34:37 +01001100 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001101 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001102 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1103 {
1104 1062, 1580, 1850, 1530, 1117,
1105 2140, 3108, 3500, 2842, 2042,
1106 3580, 5068, 5460, 4342, 3062,
1107 3618, 5072, 5390, 4248, 2971,
1108 3074, 4282, 4510, 3533, 2457,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001109
Teresa Charlin20b1f882019-06-19 09:34:37 +01001110 1550, 2284, 2362, 1955, 1428,
1111 2910, 4206, 4342, 3528, 2536,
1112 3390, 4886, 5022, 4068, 2916,
1113 3566, 5056, 5182, 4133, 2922,
1114 3100, 4352, 4452, 3517, 2465
Nikhil Rajcec6b652018-10-12 13:51:57 +01001115 })));
1116
Teresa Charlin20b1f882019-06-19 09:34:37 +01001117 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001118 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001119 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001120 input,
1121 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001122 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +01001123 expectedOutput,
1124 qScale,
1125 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001126 layout,
Nikhil Rajcec6b652018-10-12 13:51:57 +01001127 1, // Padding left.
1128 1, // Padding top.
1129 2, // Padding right.
1130 2, // Padding bottom.
1131 1, // strideX
1132 1); // strideY
1133}
1134
Bruno Goncalves22972f02019-04-26 21:03:24 -03001135template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1136 typename T = armnn::ResolveType<ArmnnType>>
1137LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
1138 armnn::IWorkloadFactory& workloadFactory,
1139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1140 float qScale,
1141 int32_t qOffset,
1142 bool biasEnabled)
1143{
Teresa Charlin20b1f882019-06-19 09:34:37 +01001144 auto layout = armnn::DataLayout::NHWC;
1145
1146 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001147 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001148 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
1149 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001150 0, 0, 0, 0, 0, 0, 0, 0, 0,
1151 0, 0, 0, 0, 0, 0, 0, 0, 0,
1152 0, 0, 0, 0, 0, 0, 0, 0, 0,
1153 0, 0, 0, 1, 1, 1, 0, 0, 0,
1154 0, 0, 0, 1, 1, 1, 0, 0, 0,
1155 0, 0, 0, 1, 1, 1, 0, 0, 0,
1156 0, 0, 0, 0, 0, 0, 0, 0, 0,
1157 0, 0, 0, 0, 0, 0, 0, 0, 0,
1158 0, 0, 0, 0, 0, 0, 0, 0, 0
1159 })));
1160
1161 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1162 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001163 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
1164 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001165 1, 2, 3,
1166 4, 5, 6,
1167 7, 8, 9
1168 })));
1169
1170 uint32_t padLeft = 0;
1171 uint32_t padTop = 0;
1172 uint32_t padRight = 0;
1173 uint32_t padBottom = 0;
1174 uint32_t strideX = 1;
1175 uint32_t strideY = 1;
1176 uint32_t dilationX = 3;
1177 uint32_t dilationY = 3;
1178
1179 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Teresa Charlin20b1f882019-06-19 09:34:37 +01001180 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001181 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001182 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1183 {
Bruno Goncalves22972f02019-04-26 21:03:24 -03001184 5, 5, 5,
1185 5, 5, 5,
1186 5, 5, 5
1187 })));
1188
Teresa Charlin20b1f882019-06-19 09:34:37 +01001189 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
Bruno Goncalves22972f02019-04-26 21:03:24 -03001190 workloadFactory,
1191 memoryManager,
1192 input,
1193 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +01001194 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -03001195 expectedOutput,
1196 qScale,
1197 qOffset,
Teresa Charlin20b1f882019-06-19 09:34:37 +01001198 layout,
Bruno Goncalves22972f02019-04-26 21:03:24 -03001199 padLeft,
1200 padTop,
1201 padRight,
1202 padBottom,
1203 strideX,
1204 strideY,
1205 dilationX,
1206 dilationY);
telsoa014fcda012018-03-09 14:13:49 +00001207}
1208
Teresa Charlin20b1f882019-06-19 09:34:37 +01001209
1210template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1211LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1214 const std::vector<float>& inputNoQuantizedValues,
1215 armnn::TensorInfo& inputTensorInfo,
1216 const std::vector<float>& kernelNoQuantizedValues,
1217 armnn::TensorInfo& kernelTensorInfo,
1218 const std::vector<float>& outputExpectedNoQuantizedValues,
1219 armnn::TensorInfo& outputTensorInfo,
1220 uint32_t dilationX,
1221 uint32_t dilationY,
1222 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1223 bool biasEnabled = false)
1224{
1225 float qScale;
1226 int32_t qOffset;
1227 switch (ArmnnType)
1228 {
1229 case armnn::DataType::QuantisedAsymm8:
1230 {
1231 qScale = 0.1f;
1232 qOffset = 128;
1233 break;
1234 }
1235 case armnn::DataType::QuantisedSymm16:
1236 {
1237 qScale = 0.1f;
1238 qOffset = 0;
1239 break;
1240 }
1241 case armnn::DataType::Float32:
1242 default:
1243 {
1244 qScale = 0.f;
1245 qOffset = 0;
1246 break;
1247 }
1248 }
1249
1250 inputTensorInfo.SetQuantizationScale(qScale);
1251 inputTensorInfo.SetQuantizationOffset(qOffset);
1252 kernelTensorInfo.SetQuantizationScale(qScale);
1253 kernelTensorInfo.SetQuantizationOffset(qOffset);
1254 outputTensorInfo.SetQuantizationScale(qScale);
1255 outputTensorInfo.SetQuantizationOffset(qOffset);
1256
1257 auto input = MakeTensor<T, 4>(inputTensorInfo,
1258 std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
1259 inputTensorInfo.GetQuantizationOffset(),
1260 inputNoQuantizedValues)));
1261 auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
1262 std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
1263 kernelTensorInfo.GetQuantizationOffset(),
1264 kernelNoQuantizedValues)));
1265 auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
1266 std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
1267 outputTensorInfo.GetQuantizationOffset(),
1268 outputExpectedNoQuantizedValues)));
1269
1270 uint32_t padLeft = 0;
1271 uint32_t padTop = 0;
1272 uint32_t padRight = 0;
1273 uint32_t padBottom = 0;
1274 uint32_t strideX = 1;
1275 uint32_t strideY = 1;
1276
1277 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1278 workloadFactory,
1279 memoryManager,
1280 input,
1281 kernel,
1282 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1283 expectedOutput,
1284 qScale,
1285 qOffset,
1286 layout,
1287 padLeft,
1288 padTop,
1289 padRight,
1290 padBottom,
1291 strideX,
1292 strideY,
1293 dilationX,
1294 dilationY);
1295}
1296
1297template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1298LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
1299 armnn::IWorkloadFactory& workloadFactory,
1300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1301 bool biasEnabled,
1302 const armnn::DataLayout layout)
1303{
1304 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
1305 std::vector<float> inputNoQuantizedValues =
1306 {
1307 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1308 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1309 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1310 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1311 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1312 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1313 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1314 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1315 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1316 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1317 };
1318
1319 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1320 std::vector<float> kernelNoQuantizedValues =
1321 {
1322 1, 2, 3,
1323 4, 5, 6,
1324 7, 8, 9
1325 };
1326
1327 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1328 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1329 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1330 std::vector<float> outputExpectedNoQuantizedValues =
1331 {
1332 6., 5., 5., 5.,
1333 6., 5., 5., 5.,
1334 6., 5., 5., 5.,
1335 3., 2., 2., 2.
1336 };
1337
1338 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1339 workloadFactory,
1340 memoryManager,
1341 inputNoQuantizedValues,
1342 inputTensorInfo,
1343 kernelNoQuantizedValues,
1344 kernelTensorInfo,
1345 outputExpectedNoQuantizedValues,
1346 outputTensorInfo,
1347 3,
1348 3,
1349 layout,
1350 biasEnabled);
1351}
1352
1353template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1354LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
1355 armnn::IWorkloadFactory& workloadFactory,
1356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1357 bool biasEnabled,
1358 const armnn::DataLayout layout)
1359{
1360 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
1361 std::vector<float> inputNoQuantizedValues =
1362 {
1363 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1364 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1365 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1366 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1367 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1368 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1369 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1370 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1372 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1373
1374 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1375 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1376 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1377 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1378 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1379 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1381 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1384 };
1385
1386 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType);
1387 std::vector<float> kernelNoQuantizedValues =
1388 {
1389 1, 2, 3,
1390 4, 5, 6,
1391 7, 8, 9,
1392
1393 1, 2, 3,
1394 4, 5, 6,
1395 7, 8, 9
1396 };
1397
1398 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1399 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1400 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
1401 std::vector<float> outputExpectedNoQuantizedValues =
1402 {
1403 6., 5., 5., 5.,
1404 6., 5., 5., 5.,
1405 6., 5., 5., 5.,
1406 3., 2., 2., 2.,
1407
1408 6., 5., 5., 5.,
1409 6., 5., 5., 5.,
1410 6., 5., 5., 5.,
1411 3., 2., 2., 2.
1412 };
1413
1414 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1415 workloadFactory,
1416 memoryManager,
1417 inputNoQuantizedValues,
1418 inputTensorInfo,
1419 kernelNoQuantizedValues,
1420 kernelTensorInfo,
1421 outputExpectedNoQuantizedValues,
1422 outputTensorInfo,
1423 3,
1424 3,
1425 layout,
1426 biasEnabled);
1427}
1428
1429
1430template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1431DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1432 armnn::IWorkloadFactory&,
1433 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1434 bool,
1435 armnn::DataLayout);
1436
1437template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1438DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1439 armnn::IWorkloadFactory&,
1440 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1441 bool,
1442 armnn::DataLayout);
1443
1444template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1445DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1446 armnn::IWorkloadFactory&,
1447 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1448 bool,
1449 armnn::DataLayout);
1450
1451template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
1452DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
1453 armnn::IWorkloadFactory&,
1454 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1455 bool,
1456 armnn::DataLayout);
1457
1458template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
1459DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
1460 armnn::IWorkloadFactory&,
1461 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1462 bool,
1463 armnn::DataLayout);
1464
1465template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
1466DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1467 armnn::IWorkloadFactory&,
1468 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
1469 bool,
1470 armnn::DataLayout);
1471
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001472LayerTestResult<float, 4> DepthwiseConvolution2dTest(
1473 armnn::IWorkloadFactory& workloadFactory,
1474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1475 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001476 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001477{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001478 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001479 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001480}
1481
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001482LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
1483 armnn::IWorkloadFactory& workloadFactory,
1484 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1485 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +01001486{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001487 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
1488 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +01001489}
1490
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001491LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
1492 armnn::IWorkloadFactory& workloadFactory,
1493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1494 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001495 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001496{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001497 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001498 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001499}
1500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001501LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
1502 armnn::IWorkloadFactory& workloadFactory,
1503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1504 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001505 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +01001506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001507 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001508 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +01001509}
1510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001511LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
1512 armnn::IWorkloadFactory& workloadFactory,
1513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1514 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001515 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001516{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001517 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001518 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001519}
1520
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001521LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1524 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +00001525 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001527 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001528 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +00001529}
1530
Bruno Goncalves22972f02019-04-26 21:03:24 -03001531LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
1532 armnn::IWorkloadFactory& workloadFactory,
1533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1534{
1535 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Teresa Charlin20b1f882019-06-19 09:34:37 +01001536 workloadFactory,
1537 memoryManager,
1538 0.f,
1539 0,
1540 false);
Bruno Goncalves22972f02019-04-26 21:03:24 -03001541}
1542
Ruomei Yan88d44b82019-05-23 14:29:06 +01001543LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
1544 armnn::IWorkloadFactory& workloadFactory,
1545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1546 bool biasEnabled,
1547 const armnn::DataLayout layout)
1548{
1549 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1550 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1551}
1552
1553LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
1554 armnn::IWorkloadFactory& workloadFactory,
1555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1556 bool biasEnabled,
1557 const armnn::DataLayout layout)
1558{
1559 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
1560 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
1561}
1562
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001563LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001564 armnn::IWorkloadFactory& workloadFactory,
1565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +00001567 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00001568{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001569 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
1570 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +00001571}
1572
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001573LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
1574 armnn::IWorkloadFactory& workloadFactory,
1575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1576 armnn::IWorkloadFactory& refWorkloadFactory,
1577 const armnn::DataLayout layout)
1578{
1579 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
1580 workloadFactory, memoryManager, refWorkloadFactory, layout);
1581}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001582
1583LayerTestResult<float,4> SimpleNormalizationAcrossTest(
1584 armnn::IWorkloadFactory& workloadFactory,
1585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001586{
1587 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1588 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001589 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001590}
1591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001592LayerTestResult<float,4> SimpleNormalizationWithinTest(
1593 armnn::IWorkloadFactory& workloadFactory,
1594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001595{
1596 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1597 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001598 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001599}
1600
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001601LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1602 armnn::IWorkloadFactory& workloadFactory,
1603 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001604{
1605 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1606 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001607 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001608}
1609
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001610LayerTestResult<float,2> SimpleSoftmaxTest(
1611 armnn::IWorkloadFactory& workloadFactory,
1612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1613 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001614{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001615 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001616}
1617
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001618LayerTestResult<float,3> Simple3dSoftmaxTest(
1619 armnn::IWorkloadFactory& workloadFactory,
1620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1621 float beta)
1622{
1623 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1624}
1625
1626LayerTestResult<float,4> Simple4dSoftmaxTest(
1627 armnn::IWorkloadFactory& workloadFactory,
1628 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1629 float beta)
1630{
1631 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1632}
1633
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001634LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1635 armnn::IWorkloadFactory& workloadFactory,
1636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1637 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001638{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001639 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001640}
1641
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001642LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1643 armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1645 float beta)
1646{
1647 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1648}
1649
1650LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1651 armnn::IWorkloadFactory& workloadFactory,
1652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1653 float beta)
1654{
1655 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1656}
1657
nikraj01248683f2019-05-29 16:46:50 +01001658LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1659 armnn::IWorkloadFactory& workloadFactory,
1660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1661 float beta)
1662{
1663 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1664}
1665
1666LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1667 armnn::IWorkloadFactory& workloadFactory,
1668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1669 float beta)
1670{
1671 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1672}
1673
1674LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1675 armnn::IWorkloadFactory& workloadFactory,
1676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1677 float beta)
1678{
1679 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1680}
1681
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001682LayerTestResult<float,4> CompareNormalizationTest(
1683 armnn::IWorkloadFactory& workloadFactory,
1684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1685 armnn::IWorkloadFactory& refWorkloadFactory,
1686 armnn::NormalizationAlgorithmChannel normChannel,
1687 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001688{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001689 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001690}
1691
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001692LayerTestResult<float,2> CompareSoftmaxTest(
1693 armnn::IWorkloadFactory& workloadFactory,
1694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001695 armnn::IWorkloadFactory& refWorkloadFactory,
1696 float beta)
1697{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001698 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1699 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001700}
1701
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001702LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1703 armnn::IWorkloadFactory& workloadFactory,
1704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001705 armnn::IWorkloadFactory& refWorkloadFactory,
1706 float beta)
1707{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001708 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1709 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001710}
1711
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001712std::vector<LayerTestResult<float,3>> SplitterTest(
1713 armnn::IWorkloadFactory& workloadFactory,
1714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001715{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001716 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001717}
1718
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001719std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1720 armnn::IWorkloadFactory& workloadFactory,
1721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001722{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001723 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001724}
1725
Ruomei Yan25339c32019-05-28 16:48:20 +01001726std::vector<LayerTestResult<int16_t,3>> SplitterInt16Test(
1727 armnn::IWorkloadFactory& workloadFactory,
1728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1729{
1730 return SplitterTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1731}
1732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001733LayerTestResult<float, 3> CopyViaSplitterTest(
1734 armnn::IWorkloadFactory& workloadFactory,
1735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001736{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001737 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001738}
1739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001740LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1741 armnn::IWorkloadFactory& workloadFactory,
1742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001743{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001744 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001745}
1746
Ruomei Yan25339c32019-05-28 16:48:20 +01001747LayerTestResult<int16_t, 3> CopyViaSplitterInt16Test(
1748 armnn::IWorkloadFactory& workloadFactory,
1749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1750{
1751 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
1752}
1753
Jan Eilers38e05bd2019-06-26 13:10:09 +01001754void LstmUtilsZeroVectorTest()
1755{
1756 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1757 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1758 {2., 3., 3., 4.}));
1759
1760 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1761 {0., 0., 0., 0.}));
1762
1763 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1764}
1765
1766void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1767{
1768 uint32_t batchSize = 2;
1769 uint32_t vecSize = 4;
1770 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1771 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1772 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
1773 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
1774
1775 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1776 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
1777 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
1778
1779 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1780 vecSize, batchSize, expectedOutput);
1781}
1782
1783void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1784{
1785 uint32_t batchSize = 2;
1786 uint32_t vecSize = 4;
1787 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1788 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1789 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1790 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1791
1792 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1793 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1794 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1795
1796 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1797 vecSize, batchSize, expectedOutput);
1798}
1799
1800void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1801{
1802 uint32_t batchSize = 2;
1803 uint32_t vecSize = 4;
1804 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1805 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1806 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1807 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
1808
1809 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1810 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1811 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
1812
1813 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1814 vecSize, batchSize, expectedOutput);
1815}
1816
1817
1818void LstmUtilsVectorBatchVectorCwiseProductTest()
1819{
1820 uint32_t batchSize = 4;
1821 uint32_t vecSize = 29;
1822 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1823 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1824 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1825 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1826 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
1827
1828 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1829 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1830 { /* batch 0 */
1831 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1832 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1833 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
1834 /* batch 1 */
1835 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
1836 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1837 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
1838 /* batch 2 */
1839 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
1840 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1841 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
1842 /* batch 3 */
1843 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
1844 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1845 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
1846
1847 // Expect output = input * output + output.
1848 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1849 { /* batch 0 */
1850 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
1851 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
1852 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
1853 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
1854 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
1855 /* batch 1 */
1856 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
1857 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1858 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1859 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1860 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
1861 /* batch 2 */
1862 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
1863 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
1864 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
1865 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
1866 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
1867 /* batch 3 */
1868 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
1869 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
1870 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
1871 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
1872 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
1873
1874 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1875 vecSize, batchSize, expectedOutput);
1876}
1877
1878
1879void LstmUtilsVectorBatchVectorAddTest()
1880{
1881 uint32_t batchSize = 2;
1882 uint32_t vecSize = 3;
1883 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1884 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1885 { 0.0f, -0.5f, 1.0f}));
1886
1887 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1888 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1889 { 1.0f, 2.0f, 3.0f, //batch 0
1890 4.0f, 5.0f, 6.0f})); //batch 1
1891
1892 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1893 { 1.0f, 1.5f, 4.0f,
1894 4.0f, 4.5f, 7.0f}));
1895
1896 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1897 vecSize, batchSize, expectedOutput);
1898}
1899
1900
telsoa01c577f2c2018-08-31 09:22:23 +01001901LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001902 armnn::IWorkloadFactory& workloadFactory,
1903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001905 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001906 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1907 { 2., 3., 3., 4. }));
1908
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001909 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001910 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1911 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1912 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001913 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001914 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001915}
1916
1917LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001918 armnn::IWorkloadFactory& workloadFactory,
1919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001920{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001921 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001922 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1923 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1924 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1925
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001926 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001927 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1928 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1929 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1930 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1931 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1932 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1933 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1934 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001935 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1936 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001937}
1938
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001939LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1940 armnn::IWorkloadFactory& workloadFactory,
1941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001942{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001943 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001944 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1945 {2., 3., 3., 4.}));
1946
1947
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001948 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001949 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1950 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1951 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1952
Conor Kennedyb9971c92019-05-07 07:14:23 +01001953 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001954 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001955}
1956
Jan Eilers38e05bd2019-06-26 13:10:09 +01001957
1958LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1959 armnn::IWorkloadFactory& workloadFactory,
1960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1961{
1962 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1963 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1964 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
1965 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
1966
1967 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
1968 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1969 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
1970 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
1971 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
1972 workloadFactory, memoryManager, input, expectedOutput);
1973}
1974
1975
Conor Kennedyb9971c92019-05-07 07:14:23 +01001976LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1977 armnn::IWorkloadFactory& workloadFactory,
1978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1979{
1980 const float qScale = 1.0f;
1981 const int32_t qOffset = 0;
1982
1983 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1984 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1985
1986 armnn::TensorInfo inputDesc({2, 2}, datatype);
1987 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1988 std::vector<float>{2., 3., 3., 4.}));
1989
1990 armnn::TensorInfo outputDesc({2, 4}, datatype);
1991 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1992 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1993 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1994
1995 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1996 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1997
1998}
1999
2000LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
2001 armnn::IWorkloadFactory& workloadFactory,
2002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2003{
2004 const float qScale = 1.0f;
2005 const int32_t qOffset = 0;
2006
2007 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2008 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2009
2010 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
2011 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
2012 std::vector<float>({ 2., 3., 3., 4. })));
2013
2014 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
2015 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2016 qOffset, std::vector<float>(
2017 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2018 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
2019
2020 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2021 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2022}
2023
2024LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2025 armnn::IWorkloadFactory& workloadFactory,
2026 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2027{
2028 const float qScale = 2.0f;
2029 const int32_t qOffset = 0;
2030
2031 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2032 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2033
2034 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2035 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2036 qOffset, std::vector<float>(
2037 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2038 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2039
2040 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2041 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2042 qOffset, std::vector<float>(
2043 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2044 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2045 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2046 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2047 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2048 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2049
2050 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2051 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2052}
2053
2054LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2055 armnn::IWorkloadFactory& workloadFactory,
2056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2057{
2058 const float qScale = 1.0f;
2059 const int32_t qOffset = 0;
2060
2061 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2062
2063 armnn::TensorInfo inputDesc({2, 2}, datatype);
2064 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2065 qOffset, std::vector<float>{2., 3., 3., 4.}));
2066
2067 armnn::TensorInfo outputDesc({2, 4}, datatype);
2068 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2069 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2070 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2071
2072 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2073 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2074}
2075
Jim Flynn4ed6c832019-05-20 11:02:46 +01002076LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002077 armnn::IWorkloadFactory& workloadFactory,
2078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002079{
surmeh013537c2c2018-05-18 16:31:43 +01002080 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00002081 unsigned int outputHeight = 6;
2082 unsigned int outputChannels = 3;
2083
surmeh013537c2c2018-05-18 16:31:43 +01002084 unsigned int inputWidth1 = 3;
2085 unsigned int inputHeight1 = 6;
2086 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00002087
surmeh013537c2c2018-05-18 16:31:43 +01002088 unsigned int inputWidth2 = 3;
2089 unsigned int inputHeight2 = 6;
2090 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00002091
telsoa01c577f2c2018-08-31 09:22:23 +01002092 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00002093 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
2094 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
2095 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00002096
2097 LayerTestResult<float,3> ret(outputTensorInfo);
2098
telsoa014fcda012018-03-09 14:13:49 +00002099 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01002100 {
2101 1.0f, 2.0f, 3.0f,
2102 4.0f, 5.0f, 6.0f,
2103 7.0f, 8.0f, 9.0f,
2104 10.0f, 11.0f, 12.0f,
2105 13.0f, 14.0f, 15.0f,
2106 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002107
surmeh013537c2c2018-05-18 16:31:43 +01002108 19.0f, 20.0f, 21.0f,
2109 22.0f, 23.0f, 24.0f,
2110 25.0f, 26.0f, 27.0f,
2111 28.0f, 29.0f, 30.0f,
2112 31.0f, 32.0f, 33.0f,
2113 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002114
surmeh013537c2c2018-05-18 16:31:43 +01002115 37.0f, 38.0f, 39.0f,
2116 40.0f, 41.0f, 42.0f,
2117 43.0f, 44.0f, 45.0f,
2118 46.0f, 47.0f, 48.0f,
2119 49.0f, 50.0f, 51.0f,
2120 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002121 })
2122 );
2123
telsoa014fcda012018-03-09 14:13:49 +00002124 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2125 {
surmeh013537c2c2018-05-18 16:31:43 +01002126 1.0f, 2.0f, 3.0f,
2127 4.0f, 5.0f, 6.0f,
2128 7.0f, 8.0f, 9.0f,
2129 10.0f, 11.0f, 12.0f,
2130 13.0f, 14.0f, 15.0f,
2131 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00002132
surmeh013537c2c2018-05-18 16:31:43 +01002133 19.0f, 20.0f, 21.0f,
2134 22.0f, 23.0f, 24.0f,
2135 25.0f, 26.0f, 27.0f,
2136 28.0f, 29.0f, 30.0f,
2137 31.0f, 32.0f, 33.0f,
2138 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00002139 })
2140 );
2141
2142 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2143 {
surmeh013537c2c2018-05-18 16:31:43 +01002144 37.0f, 38.0f, 39.0f,
2145 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00002146 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01002147 46.0f, 47.0f, 48.0f,
2148 49.0f, 50.0f, 51.0f,
2149 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00002150 })
2151 );
2152
telsoa01c577f2c2018-08-31 09:22:23 +01002153 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01002154 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00002155
telsoa01c577f2c2018-08-31 09:22:23 +01002156 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01002157 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00002158
telsoa014fcda012018-03-09 14:13:49 +00002159 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2160
2161 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2162
2163 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
2164 subTensorsSupported ?
2165 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2166 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2167
2168 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
2169 subTensorsSupported ?
2170 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2171 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2172
Jim Flynne242f2d2019-05-22 14:24:13 +01002173 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00002174 armnn::WorkloadInfo info;
2175 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2176 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00002177 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2178
2179 data.m_ViewOrigins.push_back(window1);
2180 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00002181
Jim Flynn4ed6c832019-05-20 11:02:46 +01002182 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00002183
2184 inputHandle1->Allocate();
2185 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00002186 outputHandle->Allocate();
2187
2188 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2189 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00002190
Derek Lambertif30f7d32019-04-09 10:25:02 +01002191 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002192 workload->Execute();
2193
2194 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2195
2196 return ret;
2197}
2198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002199LayerTestResult<float,4> AdditionTest(
2200 armnn::IWorkloadFactory& workloadFactory,
2201 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002202{
2203 unsigned int batchSize = 2;
2204 unsigned int channels = 2;
2205 unsigned int height = 2;
2206 unsigned int width = 3;
2207
2208 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2209 armnn::TensorInfo outputTensorInfo;
2210
2211 unsigned int shape[] = {batchSize, channels, height, width};
2212
2213 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2214 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2215 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2216
2217
2218 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
2219 {
2220 0.0f, 2.0f, 1.0f,
2221 0.2f, 1.0f, 2.0f,
2222
2223 1.0f, 2.0f, 1.0f,
2224 0.2f, 1.0f, 2.0f,
2225
2226 0.0f, 2.0f, 1.0f,
2227 4.2f, 1.0f, 2.0f,
2228
2229 0.0f, 0.0f, 1.0f,
2230 0.2f, 1.0f, 2.0f,
2231 }));
2232
2233 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
2234 {
2235 1.0f, 2.0f, 1.0f,
2236 0.0f, 1.0f, 2.0f,
2237
2238 1.0f, 2.0f, -2.0f,
2239 0.2f, 1.0f, 2.0f,
2240
2241 0.0f, 2.0f, 1.0f,
2242 4.2f, 0.0f, -3.0f,
2243
2244 0.0f, 0.0f, 1.0f,
2245 0.7f, 1.0f, 5.0f,
2246 }));
2247
2248 LayerTestResult<float,4> ret(outputTensorInfo);
2249 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
2250 {
2251 1.0f, 4.0f, 2.0f,
2252 0.2f, 2.0f, 4.0f,
2253
2254 2.0f, 4.0f, -1.0f,
2255 0.4f, 2.0f, 4.0f,
2256
2257 0.0f, 4.0f, 2.0f,
2258 8.4f, 1.0f, -1.0f,
2259
2260 0.0f, 0.0f, 2.0f,
2261 0.9f, 2.0f, 7.0f,
2262 }));
2263
2264 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2265 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2266 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2267
2268 armnn::AdditionQueueDescriptor data;
2269 armnn::WorkloadInfo info;
2270 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2271 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2272 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2273
2274 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2275
2276 inputHandle1->Allocate();
2277 inputHandle2->Allocate();
2278 outputHandle->Allocate();
2279
2280 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2281 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2282
Derek Lambertif30f7d32019-04-09 10:25:02 +01002283 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002284 workload->Execute();
2285
2286 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2287
2288 return ret;
2289}
2290
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002291template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002292LayerTestResult<T, 4> AdditionBroadcastTestImpl(
2293 armnn::IWorkloadFactory& workloadFactory,
2294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002295 float qScale,
2296 int32_t qOffset)
2297{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002298 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
2299 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
2300 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002301
2302 if (armnn::IsQuantizedType<T>())
2303 {
2304 inputTensorInfo1.SetQuantizationScale(qScale);
2305 inputTensorInfo1.SetQuantizationOffset(qOffset);
2306 inputTensorInfo2.SetQuantizationScale(qScale);
2307 inputTensorInfo2.SetQuantizationOffset(qOffset);
2308 outputTensorInfo.SetQuantizationScale(qScale);
2309 outputTensorInfo.SetQuantizationOffset(qOffset);
2310 }
2311
2312 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2313 {
2314 0.0f,
2315 1.0f,
2316
2317 2.0f,
2318 3.0f,
2319
2320 4.0f,
2321 5.0f,
2322 }));
2323
2324 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2325 {
2326 0.5f, 1.5f, 2.5f,
2327 3.5f, 4.5f, 5.5f,
2328 }));
2329
2330 LayerTestResult<T,4> ret(outputTensorInfo);
2331 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2332 {
2333 0.5f, 1.5f, 2.5f,
2334 4.5f, 5.5f, 6.5f,
2335
2336 2.5f, 3.5f, 4.5f,
2337 6.5f, 7.5f, 8.5f,
2338
2339 4.5f, 5.5f, 6.5f,
2340 8.5f, 9.5f, 10.5f,
2341 }));
2342
2343 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2344 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2345 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2346
2347 armnn::AdditionQueueDescriptor data;
2348 armnn::WorkloadInfo info;
2349 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2350 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2351 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2352
2353 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2354
2355 inputHandle1->Allocate();
2356 inputHandle2->Allocate();
2357 outputHandle->Allocate();
2358
2359 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2360 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2361
Derek Lambertif30f7d32019-04-09 10:25:02 +01002362 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002363 workload->Execute();
2364
2365 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2366
2367 return ret;
2368}
2369
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002370template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002371LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
2372 armnn::IWorkloadFactory& workloadFactory,
2373 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002374 float qScale,
2375 int32_t qOffset)
2376{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002377 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
2378 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
2379 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00002380
2381 if (armnn::IsQuantizedType<T>())
2382 {
2383 inputTensorInfo1.SetQuantizationScale(qScale);
2384 inputTensorInfo1.SetQuantizationOffset(qOffset);
2385 inputTensorInfo2.SetQuantizationScale(qScale);
2386 inputTensorInfo2.SetQuantizationOffset(qOffset);
2387 outputTensorInfo.SetQuantizationScale(qScale);
2388 outputTensorInfo.SetQuantizationOffset(qOffset);
2389 }
2390
2391 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
2392 {
2393 0.0f, 1.0f, 2.0f,
2394 3.0f, 4.0f, 5.0f,
2395 6.0f, 7.0f, 8.0f,
2396 9.0f, 10.0f, 11.0f,
2397 12.0f, 13.0f, 14.0f,
2398 15.0f, 16.0f, 17.0f,
2399 }));
2400
2401 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
2402 {
2403 0.5f,
2404 }));
2405
2406 LayerTestResult<T,4> ret(outputTensorInfo);
2407 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
2408 {
2409 0.5f, 1.5f, 2.5f,
2410 3.5f, 4.5f, 5.5f,
2411 6.5f, 7.5f, 8.5f,
2412 9.5f, 10.5f, 11.5f,
2413 12.5f, 13.5f, 14.5f,
2414 15.5f, 16.5f, 17.5f,
2415 }));
2416
2417 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2418 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2419 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2420
2421 armnn::AdditionQueueDescriptor data;
2422 armnn::WorkloadInfo info;
2423 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2424 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2425 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2426
2427 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2428
2429 inputHandle1->Allocate();
2430 inputHandle2->Allocate();
2431 outputHandle->Allocate();
2432
2433 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2434 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2435
Derek Lambertif30f7d32019-04-09 10:25:02 +01002436 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002437 workload->Execute();
2438
2439 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2440
2441 return ret;
2442}
2443
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002444LayerTestResult<float, 4> AdditionBroadcastTest(
2445 armnn::IWorkloadFactory& workloadFactory,
2446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002447{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002448 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
2449 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002450}
2451
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002452LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
2453 armnn::IWorkloadFactory& workloadFactory,
2454 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002455{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002456 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
2457 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002458}
2459
Sadik Armagan2999a022019-04-09 14:20:12 +01002460LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
2461 armnn::IWorkloadFactory& workloadFactory,
2462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2463{
2464 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
2465 workloadFactory, memoryManager, 2.f, 0);
2466}
2467
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002468LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
2469 armnn::IWorkloadFactory& workloadFactory,
2470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002471{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002472 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
2473 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002474}
2475
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002476LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
2477 armnn::IWorkloadFactory& workloadFactory,
2478 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002479{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002480 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
2481 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00002482}
2483
Sadik Armagan2999a022019-04-09 14:20:12 +01002484LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
2485 armnn::IWorkloadFactory& workloadFactory,
2486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2487{
2488 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
2489 workloadFactory, memoryManager, 0.1333333f, 0);
2490}
2491
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002492LayerTestResult<float,4> CompareAdditionTest(
2493 armnn::IWorkloadFactory& workloadFactory,
2494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2495 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002496{
2497 unsigned int batchSize = 4;
2498 unsigned int channels = 1;
2499 unsigned int height = 2;
2500 unsigned int width = 3;
2501
2502 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
2503 armnn::TensorInfo outputTensorInfo;
2504
2505 unsigned int shape[] = {batchSize, channels, height, width};
2506
2507 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2508 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2509 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2510
2511 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
2512 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
2513
2514 LayerTestResult<float,4> ret(outputTensorInfo);
2515
2516 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2517 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
2518 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2519
2520 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2521 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
2522 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2523
2524 armnn::AdditionQueueDescriptor data;
2525 armnn::WorkloadInfo info;
2526 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2527 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2528 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2529
2530 armnn::AdditionQueueDescriptor refData = data;
2531 armnn::WorkloadInfo refInfo = info;
2532 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
2533 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
2534 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2535
2536 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
2537 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
2538
2539 inputHandle1->Allocate();
2540 inputHandle2->Allocate();
2541 outputHandle->Allocate();
2542 inputHandle1Ref->Allocate();
2543 inputHandle2Ref->Allocate();
2544 outputHandleRef->Allocate();
2545
2546 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2547 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
2548 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2549 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
2550
Derek Lambertif30f7d32019-04-09 10:25:02 +01002551 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002552 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01002553 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00002554 workloadRef->Execute();
2555
2556 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2557 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2558
2559 return ret;
2560}
2561
surmeh01bceff2f2018-03-29 16:29:27 +01002562namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01002563template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002564LayerTestResult<T, 4> DivisionTestHelper(
2565 armnn::IWorkloadFactory& workloadFactory,
2566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2567 const unsigned int shape0[4],
2568 const std::vector<T>& values0,
2569 float scale0,
2570 int32_t offset0,
2571 const unsigned int shape1[4],
2572 const std::vector<T> & values1,
2573 float scale1,
2574 int32_t offset1,
2575 const unsigned int outShape[4],
2576 const std::vector<T> & outValues,
2577 float outScale,
2578 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01002579{
Sadik Armagan2999a022019-04-09 14:20:12 +01002580 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
2581 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
2582 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002583
David Beck5cd01f32018-09-12 16:00:08 +01002584 inputTensorInfo0.SetQuantizationScale(scale0);
2585 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002586
David Beck5cd01f32018-09-12 16:00:08 +01002587 inputTensorInfo1.SetQuantizationScale(scale1);
2588 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002589
David Beck5cd01f32018-09-12 16:00:08 +01002590 outputTensorInfo.SetQuantizationScale(outScale);
2591 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002592
David Beck5cd01f32018-09-12 16:00:08 +01002593 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
2594 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002595
David Beck5cd01f32018-09-12 16:00:08 +01002596 LayerTestResult<T, 4> result(outputTensorInfo);
2597 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002598
David Beck5cd01f32018-09-12 16:00:08 +01002599 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2600 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2601 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002602
David Beck5cd01f32018-09-12 16:00:08 +01002603 armnn::DivisionQueueDescriptor data;
2604 armnn::WorkloadInfo info;
2605 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2606 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2607 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002608
David Beck5cd01f32018-09-12 16:00:08 +01002609 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002610
David Beck5cd01f32018-09-12 16:00:08 +01002611 inputHandle0->Allocate();
2612 inputHandle1->Allocate();
2613 outputHandle->Allocate();
2614
2615 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2616 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2617
Derek Lambertif30f7d32019-04-09 10:25:02 +01002618 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01002619 workload->Execute();
2620
2621 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
2622
2623 return result;
2624}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002625} // anonymous namespace
2626
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002627LayerTestResult<float,4> DivisionByZeroTest(
2628 armnn::IWorkloadFactory& workloadFactory,
2629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002630{
2631 const unsigned int width = 2;
2632 const unsigned int height = 2;
2633 const unsigned int channelCount = 2;
2634 const unsigned int batchSize = 2;
2635
2636 unsigned int shape[] = { batchSize, channelCount, height, width };
2637
2638 std::vector<float> input0({
2639 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
2640 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
2641
2642 std::vector<float> input1({
2643 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
2644 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
2645
2646 std::vector<float> output({
2647 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
2648 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
2649
Sadik Armagan2999a022019-04-09 14:20:12 +01002650 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2651 memoryManager,
2652 shape, input0, 1.0f, 0,
2653 shape, input1, 1.0f, 0,
2654 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01002655}
2656
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002657LayerTestResult<float,4> DivisionTest(
2658 armnn::IWorkloadFactory& workloadFactory,
2659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002660{
2661 const unsigned int width = 2;
2662 const unsigned int height = 2;
2663 const unsigned int channelCount = 2;
2664 const unsigned int batchSize = 2;
2665
2666 unsigned int shape[] = { batchSize, channelCount, height, width };
2667
2668 std::vector<float> input0({
2669 2, 2, 2, 2, 3, 3, 3, 3,
2670 4, 4, 4, 4, 5, 5, 5, 5 });
2671
2672 std::vector<float> input1({
2673 1, 1, 1, 1, 2, 2, 2, 2,
2674 4, 4, 4, 4, 4, 4, 4, 4 });
2675
2676 std::vector<float> output({
2677 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
2678 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
2679
David Beck5cd01f32018-09-12 16:00:08 +01002680
Sadik Armagan2999a022019-04-09 14:20:12 +01002681 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2682 memoryManager,
2683 shape, input0, 1.0f, 0,
2684 shape, input1, 1.0f, 0,
2685 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002686}
2687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002688LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
2689 armnn::IWorkloadFactory& workloadFactory,
2690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002691{
2692 unsigned int shape0[] = { 1, 2, 2, 2 };
2693 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2694
2695 unsigned int shape1[] = { 1, 1, 1, 1 };
2696 std::vector<float> input1({ 2 });
2697
2698 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2699
David Beck5cd01f32018-09-12 16:00:08 +01002700
Sadik Armagan2999a022019-04-09 14:20:12 +01002701 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2702 memoryManager,
2703 shape0, input0, 1.0f, 0,
2704 shape1, input1, 1.0f, 0,
2705 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002706}
2707
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002708LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
2709 armnn::IWorkloadFactory& workloadFactory,
2710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002711{
2712 unsigned int shape0[] = { 1, 3, 3, 2 };
2713 std::vector<float> input0({
2714 1, 4, 3, 8, 5, 12,
2715 7, 16, 9, 20, 11, 24,
2716 13, 28, 15, 32, 17, 36});
2717
2718 unsigned int shape1[] = { 1, 1, 1, 2 };
2719 std::vector<float> input1({ 1, 2 });
2720
2721 std::vector<float> output({
2722 1, 2, 3, 4, 5, 6,
2723 7, 8, 9, 10, 11, 12,
2724 13, 14, 15, 16, 17, 18});
2725
Sadik Armagan2999a022019-04-09 14:20:12 +01002726 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
2727 memoryManager,
2728 shape0, input0, 1.0f, 0,
2729 shape1, input1, 1.0f, 0,
2730 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002731}
2732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002733LayerTestResult<uint8_t,4> DivisionUint8Test(
2734 armnn::IWorkloadFactory& workloadFactory,
2735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002736{
2737 const unsigned int width = 2;
2738 const unsigned int height = 2;
2739 const unsigned int channelCount = 2;
2740 const unsigned int batchSize = 2;
2741
2742 unsigned int shape[] = { batchSize, channelCount, height, width };
2743
2744 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2745 4, 4, 4, 4, 5, 5, 5, 5 });
2746
2747 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2748 4, 4, 4, 4, 4, 4, 4, 4 });
2749
2750 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2751 4, 4, 4, 4, 5, 5, 5, 5});
2752
2753
Sadik Armagan2999a022019-04-09 14:20:12 +01002754 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2755 memoryManager,
2756 shape, input0, 1.0f, 0,
2757 shape, input1, 1.0f, 0,
2758 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002759}
2760
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002761LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
2762 armnn::IWorkloadFactory& workloadFactory,
2763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002764{
2765 unsigned int shape0[] = { 1, 2, 2, 2 };
2766 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2767
2768 unsigned int shape1[] = { 1, 1, 1, 1 };
2769 std::vector<uint8_t> input1({ 2 });
2770
2771 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2772
Sadik Armagan2999a022019-04-09 14:20:12 +01002773 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2774 memoryManager,
2775 shape0, input0, 1.0f, 0,
2776 shape1, input1, 1.0f, 0,
2777 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002778}
2779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002780LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2781 armnn::IWorkloadFactory& workloadFactory,
2782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002783{
2784 unsigned int shape0[] = { 1, 3, 3, 2 };
2785 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2786 7, 16, 9, 20, 11, 24,
2787 13, 28, 15, 32, 17, 36});
2788
2789 unsigned int shape1[] = { 1, 1, 1, 2 };
2790 std::vector<uint8_t> input1({ 1, 2 });
2791
2792 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2793 7, 8, 9, 10, 11, 12,
2794 13, 14, 15, 16, 17, 18});
2795
Sadik Armagan2999a022019-04-09 14:20:12 +01002796 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2797 memoryManager,
2798 shape0, input0, 1.0f, 0,
2799 shape1, input1, 1.0f, 0,
2800 shape0, output, 1.0f, 0);
2801}
2802
2803LayerTestResult<int16_t,4> DivisionInt16Test(
2804 armnn::IWorkloadFactory& workloadFactory,
2805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2806{
2807 unsigned int shape[] = { 2, 2, 2, 2 };
2808
2809 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2810 4, 4, 4, 4, 5, 5, 5, 5 });
2811
2812 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2813 4, 4, 4, 4, 4, 4, 4, 4 });
2814
2815 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2816 4, 4, 4, 4, 5, 5, 5, 5});
2817
2818
2819 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2820 memoryManager,
2821 shape, input0, 1.0f, 0,
2822 shape, input1, 1.0f, 0,
2823 shape, output, 0.25f, 0);
2824}
2825
2826LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2827 armnn::IWorkloadFactory& workloadFactory,
2828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2829{
2830 unsigned int shape0[] = { 1, 2, 2, 2 };
2831 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2832
2833 unsigned int shape1[] = { 1, 1, 1, 1 };
2834 std::vector<int16_t> input1({ 2 });
2835
2836 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2837
2838 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2839 memoryManager,
2840 shape0, input0, 1.0f, 0,
2841 shape1, input1, 1.0f, 0,
2842 shape0, output, 1.0f, 0);
2843}
2844
2845LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2846 armnn::IWorkloadFactory& workloadFactory,
2847 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2848{
2849 unsigned int shape0[] = { 1, 3, 3, 2 };
2850 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2851 7, 16, 9, 20, 11, 24,
2852 13, 28, 15, 32, 17, 36});
2853
2854 unsigned int shape1[] = { 1, 1, 1, 2 };
2855 std::vector<int16_t> input1({ 1, 2 });
2856
2857 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2858 7, 8, 9, 10, 11, 12,
2859 13, 14, 15, 16, 17, 18});
2860
2861 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2862 memoryManager,
2863 shape0, input0, 1.0f, 0,
2864 shape1, input1, 1.0f, 0,
2865 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002866}
2867
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002868template<typename DescriptorType>
2869std::unique_ptr<armnn::IWorkload> CreateWorkload(
2870 const armnn::IWorkloadFactory& workloadFactory,
2871 const armnn::WorkloadInfo& info,
2872 const DescriptorType& descriptor)
2873{
2874 return CreateWorkload(workloadFactory, info, descriptor);
2875};
2876
2877template<>
2878std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2879 const armnn::IWorkloadFactory& workloadFactory,
2880 const armnn::WorkloadInfo& info,
2881 const armnn::MaximumQueueDescriptor& descriptor)
2882{
2883 return workloadFactory.CreateMaximum(descriptor, info);
2884}
2885
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002886template<>
2887std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2888 const armnn::IWorkloadFactory& workloadFactory,
2889 const armnn::WorkloadInfo& info,
2890 const armnn::MinimumQueueDescriptor& descriptor)
2891{
2892 return workloadFactory.CreateMinimum(descriptor, info);
2893}
2894
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002895template<>
2896std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2897 const armnn::IWorkloadFactory& workloadFactory,
2898 const armnn::WorkloadInfo& info,
2899 const armnn::EqualQueueDescriptor& descriptor)
2900{
2901 return workloadFactory.CreateEqual(descriptor, info);
2902}
2903
FrancisMurtagh878f0232018-12-19 10:56:15 +00002904template<>
2905std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2906 const armnn::IWorkloadFactory& workloadFactory,
2907 const armnn::WorkloadInfo& info,
2908 const armnn::GreaterQueueDescriptor& descriptor)
2909{
2910 return workloadFactory.CreateGreater(descriptor, info);
2911}
2912
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002913namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002914
2915template <typename Descriptor,
2916 armnn::DataType ArmnnTypeInput,
2917 armnn::DataType ArmnnTypeOutput,
2918 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2919 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2920LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2921 armnn::IWorkloadFactory & workloadFactory,
2922 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2923 const unsigned int shape0[4], std::vector<TInput> values0,
2924 const unsigned int shape1[4], std::vector<TInput> values1,
2925 const unsigned int outShape[4], std::vector<TOutput> outValues,
2926 float qScale = 0.0f, int qOffset = 0)
2927{
2928 const size_t dimensionCount = 4;
2929 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2930 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2931 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2932
2933 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2934 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2935
2936 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002937 {
kevmay012b4d88e2019-01-24 14:05:09 +00002938 inputTensorInfo0.SetQuantizationScale(qScale);
2939 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002940
kevmay012b4d88e2019-01-24 14:05:09 +00002941 inputTensorInfo1.SetQuantizationScale(qScale);
2942 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002943
kevmay012b4d88e2019-01-24 14:05:09 +00002944 outputTensorInfo.SetQuantizationScale(qScale);
2945 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002946 }
kevmay012b4d88e2019-01-24 14:05:09 +00002947
2948 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2949
2950 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2951 {
2952 ret.compareBoolean = true;
2953 }
2954
2955 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2956 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2957 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2958
2959 Descriptor data;
2960 armnn::WorkloadInfo info;
2961 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2962 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2963 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2964 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2965
2966 inputHandle0->Allocate();
2967 inputHandle1->Allocate();
2968 outputHandle->Allocate();
2969
2970 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2971 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2972
Derek Lambertif30f7d32019-04-09 10:25:02 +01002973 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002974 ExecuteWorkload(*workload, memoryManager);
2975
2976 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2977
2978 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2979 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002980}
2981
kevmay012b4d88e2019-01-24 14:05:09 +00002982template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2983LayerTestResult<T, 4> ElementwiseTestHelper(
2984 armnn::IWorkloadFactory & workloadFactory,
2985 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2986 const unsigned int shape0[4], std::vector<T> values0,
2987 const unsigned int shape1[4], std::vector<T> values1,
2988 const unsigned int outShape[4], std::vector<T> outValues,
2989 float qScale = 0.0f, int qOffset = 0)
2990{
2991 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2992 (workloadFactory,
2993 memoryManager,
2994 shape0,
2995 values0,
2996 shape1,
2997 values1,
2998 outShape,
2999 outValues,
3000 qScale,
3001 qOffset);
3002}
3003}
3004
3005LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003007{
3008 const unsigned int width = 2;
3009 const unsigned int height = 2;
3010 const unsigned int channelCount = 2;
3011 const unsigned int batchSize = 2;
3012
3013 unsigned int shape[] = { batchSize, channelCount, height, width };
3014
3015 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3016 3, 3, 3, 3, 4, 4, 4, 4 });
3017
3018 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3019 5, 5, 5, 5, 4, 4, 4, 4 });
3020
kevmay012b4d88e2019-01-24 14:05:09 +00003021 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
3022 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003023
kevmay012b4d88e2019-01-24 14:05:09 +00003024 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003025 workloadFactory,
3026 memoryManager,
3027 shape,
3028 input0,
3029 shape,
3030 input1,
3031 shape,
3032 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003033}
3034
kevmay012b4d88e2019-01-24 14:05:09 +00003035LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003036 armnn::IWorkloadFactory& workloadFactory,
3037 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3038{
3039 unsigned int shape0[] = { 1, 2, 2, 2 };
3040 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3041
3042 unsigned int shape1[] = { 1, 1, 1, 1 };
3043 std::vector<float> input1({ 1 });
3044
kevmay012b4d88e2019-01-24 14:05:09 +00003045 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003046
kevmay012b4d88e2019-01-24 14:05:09 +00003047 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003048 workloadFactory,
3049 memoryManager,
3050 shape0,
3051 input0,
3052 shape1,
3053 input1,
3054 shape0,
3055 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003056}
3057
kevmay012b4d88e2019-01-24 14:05:09 +00003058LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003059 armnn::IWorkloadFactory& workloadFactory,
3060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3061{
3062 const unsigned int shape0[] = { 1, 2, 2, 3 };
3063 const unsigned int shape1[] = { 1, 1, 1, 3 };
3064
3065 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3066 7, 8, 9, 10, 11, 12 });
3067
3068 std::vector<float> input1({ 1, 2, 3});
3069
kevmay012b4d88e2019-01-24 14:05:09 +00003070 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
3071 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003072
kevmay012b4d88e2019-01-24 14:05:09 +00003073 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003074 workloadFactory,
3075 memoryManager,
3076 shape0,
3077 input0,
3078 shape1,
3079 input1,
3080 shape0,
3081 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003082}
3083
3084LayerTestResult<uint8_t, 4> EqualUint8Test(
3085 armnn::IWorkloadFactory& workloadFactory,
3086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3087{
3088 unsigned int shape[] = { 2, 2, 2, 2 };
3089
3090 // See dequantized values to the right.
3091 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003092 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003093
3094 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3095 3, 3, 3, 3, 5, 5, 5, 5 });
3096
3097 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3098 1, 1, 1, 1, 0, 0, 0, 0 });
3099
kevmay012b4d88e2019-01-24 14:05:09 +00003100 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3101 armnn::DataType::QuantisedAsymm8,
3102 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003103 workloadFactory,
3104 memoryManager,
3105 shape,
3106 input0,
3107 shape,
3108 input1,
3109 shape,
3110 output,
3111 1.0f,
3112 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003113}
3114
3115LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
3116 armnn::IWorkloadFactory& workloadFactory,
3117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3118{
3119 const unsigned int shape0[] = { 1, 2, 2, 3 };
3120 const unsigned int shape1[] = { 1, 1, 1, 1 };
3121
3122 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3123 7, 8, 9, 10, 11, 12 });
3124
3125 std::vector<uint8_t> input1({ 1 });
3126
3127 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
3128 0, 0, 0, 0, 0, 0 });
3129
kevmay012b4d88e2019-01-24 14:05:09 +00003130 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3131 armnn::DataType::QuantisedAsymm8,
3132 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003133 workloadFactory,
3134 memoryManager,
3135 shape0,
3136 input0,
3137 shape1,
3138 input1,
3139 shape0,
3140 output,
3141 1.0f,
3142 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003143}
3144
3145LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
3146 armnn::IWorkloadFactory& workloadFactory,
3147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3148{
3149 const unsigned int shape0[] = { 1, 2, 2, 3 };
3150 const unsigned int shape1[] = { 1, 1, 1, 3 };
3151
3152 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3153 7, 8, 9, 10, 11, 12 });
3154
3155 std::vector<uint8_t> input1({ 1, 1, 3});
3156
3157 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
3158 0, 0, 0, 0, 0, 0 });
3159
kevmay012b4d88e2019-01-24 14:05:09 +00003160 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
3161 armnn::DataType::QuantisedAsymm8,
3162 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003163 workloadFactory,
3164 memoryManager,
3165 shape0,
3166 input0,
3167 shape1,
3168 input1,
3169 shape0,
3170 output,
3171 1.0f,
3172 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00003173}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003174
kevmay012b4d88e2019-01-24 14:05:09 +00003175LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00003176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3177{
3178 const unsigned int width = 2;
3179 const unsigned int height = 2;
3180 const unsigned int channelCount = 2;
3181 const unsigned int batchSize = 2;
3182
3183 unsigned int shape[] = { batchSize, channelCount, height, width };
3184
3185 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3186 3, 3, 3, 3, 4, 4, 4, 4 });
3187
3188 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
3189 5, 5, 5, 5, 4, 4, 4, 4 });
3190
kevmay012b4d88e2019-01-24 14:05:09 +00003191 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
3192 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003193
kevmay012b4d88e2019-01-24 14:05:09 +00003194 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003195 workloadFactory,
3196 memoryManager,
3197 shape,
3198 input0,
3199 shape,
3200 input1,
3201 shape,
3202 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003203}
3204
kevmay012b4d88e2019-01-24 14:05:09 +00003205LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003206 armnn::IWorkloadFactory& workloadFactory,
3207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3208{
3209 unsigned int shape0[] = { 1, 2, 2, 2 };
3210 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3211
3212 unsigned int shape1[] = { 1, 1, 1, 1 };
3213 std::vector<float> input1({ 1 });
3214
kevmay012b4d88e2019-01-24 14:05:09 +00003215 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00003216
kevmay012b4d88e2019-01-24 14:05:09 +00003217 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003218 workloadFactory,
3219 memoryManager,
3220 shape0,
3221 input0,
3222 shape1,
3223 input1,
3224 shape0,
3225 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003226}
3227
kevmay012b4d88e2019-01-24 14:05:09 +00003228LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00003229 armnn::IWorkloadFactory& workloadFactory,
3230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3231{
3232 const unsigned int shape0[] = { 1, 2, 2, 3 };
3233 const unsigned int shape1[] = { 1, 1, 1, 3 };
3234
3235 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
3236 7, 8, 9, 10, 11, 12 });
3237
3238 std::vector<float> input1({ 1, 3, 2});
3239
kevmay012b4d88e2019-01-24 14:05:09 +00003240 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
3241 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00003242
kevmay012b4d88e2019-01-24 14:05:09 +00003243 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003244 workloadFactory,
3245 memoryManager,
3246 shape0,
3247 input0,
3248 shape1,
3249 input1,
3250 shape0,
3251 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003252}
3253
3254LayerTestResult<uint8_t, 4> GreaterUint8Test(
3255 armnn::IWorkloadFactory& workloadFactory,
3256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3257{
3258 unsigned int shape[] = { 2, 2, 2, 2 };
3259
3260 // See dequantized values to the right.
3261 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3262 3, 3, 3, 3, 5, 5, 5, 5 });
3263
3264 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
3265 2, 2, 2, 2, 5, 5, 5, 5 });
3266
3267 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
3268 1, 1, 1, 1, 0, 0, 0, 0 });
3269
kevmay012b4d88e2019-01-24 14:05:09 +00003270 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3271 armnn::DataType::QuantisedAsymm8,
3272 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003273 workloadFactory,
3274 memoryManager,
3275 shape,
3276 input0,
3277 shape,
3278 input1,
3279 shape,
3280 output,
3281 1.0f,
3282 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003283}
3284
3285LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
3286 armnn::IWorkloadFactory& workloadFactory,
3287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3288{
3289 const unsigned int shape0[] = { 1, 2, 2, 3 };
3290 const unsigned int shape1[] = { 1, 1, 1, 1 };
3291
3292 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3293 7, 8, 9, 10, 11, 12 });
3294
3295 std::vector<uint8_t> input1({ 1 });
3296
3297 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
3298 1, 1, 1, 1, 1, 1 });
3299
kevmay012b4d88e2019-01-24 14:05:09 +00003300 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3301 armnn::DataType::QuantisedAsymm8,
3302 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003303 workloadFactory,
3304 memoryManager,
3305 shape0,
3306 input0,
3307 shape1,
3308 input1,
3309 shape0,
3310 output,
3311 1.0f,
3312 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003313}
3314
3315LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
3316 armnn::IWorkloadFactory& workloadFactory,
3317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3318{
3319 const unsigned int shape0[] = { 1, 2, 2, 3 };
3320 const unsigned int shape1[] = { 1, 1, 1, 3 };
3321
3322 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3323 7, 8, 9, 10, 11, 12 });
3324
3325 std::vector<uint8_t> input1({ 1, 1, 3});
3326
3327 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
3328 1, 1, 1, 1, 1, 1 });
3329
kevmay012b4d88e2019-01-24 14:05:09 +00003330 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
3331 armnn::DataType::QuantisedAsymm8,
3332 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003333 workloadFactory,
3334 memoryManager,
3335 shape0,
3336 input0,
3337 shape1,
3338 input1,
3339 shape0,
3340 output,
3341 1.0f,
3342 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00003343}
3344
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003345LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
3346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3347{
3348 const unsigned int width = 2;
3349 const unsigned int height = 2;
3350 const unsigned int channelCount = 2;
3351 const unsigned int batchSize = 2;
3352
3353 unsigned int shape[] = { batchSize, channelCount, height, width };
3354
3355 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
3356 3, 3, 3, 3, 4, 4, 4, 4 });
3357
3358 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3359 4, 4, 4, 4, 5, 5, 5, 5 });
3360
3361 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
3362 4, 4, 4, 4, 5, 5, 5, 5 });
3363
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003364 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3365 workloadFactory,
3366 memoryManager,
3367 shape,
3368 input0,
3369 shape,
3370 input1,
3371 shape,
3372 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003373}
3374
3375LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
3376 armnn::IWorkloadFactory& workloadFactory,
3377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3378{
3379 unsigned int shape0[] = { 1, 2, 2, 2 };
3380 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3381
3382 unsigned int shape1[] = { 1, 1, 1, 1 };
3383 std::vector<float> input1({ 2 });
3384
3385 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
3386
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003387 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3388 workloadFactory,
3389 memoryManager,
3390 shape0,
3391 input0,
3392 shape1,
3393 input1,
3394 shape0,
3395 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003396}
3397
3398LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
3399 armnn::IWorkloadFactory& workloadFactory,
3400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3401{
3402 const unsigned int shape0[] = { 1, 2, 2, 3 };
3403 const unsigned int shape1[] = { 1, 1, 1, 3 };
3404
3405 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
3406 7, 8, 9, 10, 11, 12 });
3407
3408 std::vector<float> input1({ 1, 2, 3});
3409
3410 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00003411 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003412
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003413 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
3414 workloadFactory,
3415 memoryManager,
3416 shape0,
3417 input0,
3418 shape1,
3419 input1,
3420 shape0,
3421 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003422}
3423
3424LayerTestResult<uint8_t, 4> MaximumUint8Test(
3425 armnn::IWorkloadFactory& workloadFactory,
3426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3427{
3428 unsigned int shape[] = { 2, 2, 2, 2 };
3429
3430 // See dequantized values to the right.
3431 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3432 3, 3, 3, 3, 4, 4, 4, 4 });
3433
3434 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3435 4, 4, 4, 4, 5, 5, 5, 5 });
3436
3437 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3438 4, 4, 4, 4, 5, 5, 5, 5 });
3439
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003440 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3441 workloadFactory,
3442 memoryManager,
3443 shape,
3444 input0,
3445 shape,
3446 input1,
3447 shape,
3448 output,
3449 1.0f,
3450 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003451}
3452
3453LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
3454 armnn::IWorkloadFactory& workloadFactory,
3455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3456{
3457 const unsigned int shape0[] = { 1, 2, 2, 3 };
3458 const unsigned int shape1[] = { 1, 1, 1, 1 };
3459
3460 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3461 7, 8, 9, 10, 11, 12 });
3462
3463 std::vector<uint8_t> input1({2});
3464
3465 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
3466 7, 8, 9, 10, 11, 12 });
3467
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003468 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3469 workloadFactory,
3470 memoryManager,
3471 shape0,
3472 input0,
3473 shape1,
3474 input1,
3475 shape0,
3476 output,
3477 1.0f,
3478 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003479}
3480
3481LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
3482 armnn::IWorkloadFactory& workloadFactory,
3483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3484{
3485 const unsigned int shape0[] = { 1, 2, 2, 3 };
3486 const unsigned int shape1[] = { 1, 1, 1, 3 };
3487
3488 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
3489 7, 8, 9, 10, 11, 12 });
3490
3491 std::vector<uint8_t> input1({ 1, 10, 3});
3492
3493 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
3494 7, 10, 9, 10, 11, 12 });
3495
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003496 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3497 workloadFactory,
3498 memoryManager,
3499 shape0,
3500 input0,
3501 shape1,
3502 input1,
3503 shape0,
3504 output,
3505 1.0f,
3506 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00003507}
3508
Sadik Armagan2999a022019-04-09 14:20:12 +01003509LayerTestResult<int16_t, 4> MaximumInt16Test(
3510 armnn::IWorkloadFactory& workloadFactory,
3511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3512{
3513 unsigned int shape[] = { 2, 2, 2, 2 };
3514
3515 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3516 3, 3, 3, 3, 4, 4, 4, 4 });
3517
3518 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3519 4, 4, 4, 4, 5, 5, 5, 5 });
3520
3521 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
3522 4, 4, 4, 4, 5, 5, 5, 5 });
3523
3524 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3525 workloadFactory,
3526 memoryManager,
3527 shape,
3528 input0,
3529 shape,
3530 input1,
3531 shape,
3532 output,
3533 1.0f,
3534 0);
3535}
3536
3537LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
3538 armnn::IWorkloadFactory& workloadFactory,
3539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3540{
3541 const unsigned int shape0[] = { 1, 2, 2, 3 };
3542 const unsigned int shape1[] = { 1, 1, 1, 1 };
3543
3544 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3545 7, 8, 9, 10, 11, 12 });
3546
3547 std::vector<int16_t> input1({2});
3548
3549 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
3550 7, 8, 9, 10, 11, 12 });
3551
3552 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3553 workloadFactory,
3554 memoryManager,
3555 shape0,
3556 input0,
3557 shape1,
3558 input1,
3559 shape0,
3560 output,
3561 1.0f,
3562 0);
3563}
3564
3565LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
3566 armnn::IWorkloadFactory& workloadFactory,
3567 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3568{
3569 const unsigned int shape0[] = { 1, 2, 2, 3 };
3570 const unsigned int shape1[] = { 1, 1, 1, 3 };
3571
3572 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3573 7, 8, 9, 10, 11, 12 });
3574
3575 std::vector<int16_t> input1({ 1, 10, 3});
3576
3577 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
3578 7, 10, 9, 10, 11, 12 });
3579
3580 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3581 workloadFactory,
3582 memoryManager,
3583 shape0,
3584 input0,
3585 shape1,
3586 input1,
3587 shape0,
3588 output,
3589 1.0f,
3590 0);
3591}
3592
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003593LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
3594 armnn::IWorkloadFactory& workloadFactory,
3595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3596{
3597 unsigned int shape0[] = { 1, 2, 2, 2 };
3598 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3599
3600 unsigned int shape1[] = { 1, 1, 1, 1 };
3601 std::vector<float> input1({ 2 });
3602
3603 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
3604
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003605 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3606 workloadFactory,
3607 memoryManager,
3608 shape0,
3609 input0,
3610 shape1,
3611 input1,
3612 shape0,
3613 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003614}
3615
3616
3617LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
3618 armnn::IWorkloadFactory& workloadFactory,
3619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3620{
3621 unsigned int shape0[] = { 1, 2, 2, 2 };
3622 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
3623
3624 unsigned int shape1[] = { 1, 1, 1, 1 };
3625 std::vector<float> input1({ 5 });
3626
3627 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
3628
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003629 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
3630 workloadFactory,
3631 memoryManager,
3632 shape0,
3633 input0,
3634 shape1,
3635 input1,
3636 shape0,
3637 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003638}
3639
3640LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
3641 armnn::IWorkloadFactory & workloadFactory,
3642 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
3643{
3644 const unsigned int shape0[] = { 1, 2, 2, 3 };
3645 const unsigned int shape1[] = { 1, 1, 1, 3 };
3646
3647 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
3648 7, 1, 2, 3, 4, 5 });
3649
3650 std::vector<uint8_t> input1({ 1, 2, 3});
3651
3652 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
3653 1, 1, 2, 1, 2, 3 });
3654
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003655 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
3656 workloadFactory,
3657 memoryManager,
3658 shape0,
3659 input0,
3660 shape1,
3661 input1,
3662 shape0,
3663 output,
3664 1.0f,
3665 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00003666}
3667
Sadik Armagan2999a022019-04-09 14:20:12 +01003668LayerTestResult<int16_t, 4> MinimumInt16Test(
3669 armnn::IWorkloadFactory& workloadFactory,
3670 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3671{
3672 unsigned int shape[] = { 2, 2, 2, 2 };
3673
3674 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
3675 3, 3, 3, 3, 4, 4, 4, 4 });
3676
3677 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
3678 4, 4, 4, 4, 5, 5, 5, 5 });
3679
3680 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
3681 3, 3, 3, 3, 4, 4, 4, 4 });
3682
3683 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3684 workloadFactory,
3685 memoryManager,
3686 shape,
3687 input0,
3688 shape,
3689 input1,
3690 shape,
3691 output,
3692 1.0f,
3693 0);
3694}
3695
3696LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
3697 armnn::IWorkloadFactory& workloadFactory,
3698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3699{
3700 const unsigned int shape0[] = { 1, 2, 2, 3 };
3701 const unsigned int shape1[] = { 1, 1, 1, 1 };
3702
3703 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3704 7, 8, 9, 10, 11, 12 });
3705
3706 std::vector<int16_t> input1({2});
3707
3708 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
3709 2, 2, 2, 2, 2, 2 });
3710
3711 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3712 workloadFactory,
3713 memoryManager,
3714 shape0,
3715 input0,
3716 shape1,
3717 input1,
3718 shape0,
3719 output,
3720 1.0f,
3721 0);
3722}
3723
3724LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
3725 armnn::IWorkloadFactory& workloadFactory,
3726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3727{
3728 const unsigned int shape0[] = { 1, 2, 2, 3 };
3729 const unsigned int shape1[] = { 1, 1, 1, 3 };
3730
3731 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
3732 7, 8, 9, 10, 11, 12 });
3733
3734 std::vector<int16_t> input1({ 1, 10, 3});
3735
3736 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
3737 1, 8, 3, 1, 10, 3 });
3738
3739 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
3740 workloadFactory,
3741 memoryManager,
3742 shape0,
3743 input0,
3744 shape1,
3745 input1,
3746 shape0,
3747 output,
3748 1.0f,
3749 0);
3750}
3751
Francis Murtaghe7a86a42018-08-29 12:42:10 +01003752namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003753LayerTestResult<float,4> MultiplicationTestHelper(
3754 armnn::IWorkloadFactory& workloadFactory,
3755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3756 const unsigned int shape0[4],
3757 const std::vector<float> & values0,
3758 const unsigned int shape1[4],
3759 const std::vector<float> & values1,
3760 const unsigned int outShape[4],
3761 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00003762{
surmeh01bceff2f2018-03-29 16:29:27 +01003763 const size_t dimensionCount = 4;
3764 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3765 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3766 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003767
surmeh01bceff2f2018-03-29 16:29:27 +01003768 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3769 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003770
3771 LayerTestResult<float,4> ret(outputTensorInfo);
3772
3773 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3774 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3775 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3776
3777 armnn::MultiplicationQueueDescriptor data;
3778 armnn::WorkloadInfo info;
3779 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3780 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3781 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3782
3783 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3784
3785 inputHandle0->Allocate();
3786 inputHandle1->Allocate();
3787 outputHandle->Allocate();
3788
3789 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3790 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3791
Derek Lambertif30f7d32019-04-09 10:25:02 +01003792 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003793 workload->Execute();
3794
3795 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3796
surmeh01bceff2f2018-03-29 16:29:27 +01003797 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003798 return ret;
3799}
surmeh01bceff2f2018-03-29 16:29:27 +01003800} // anonymous namespace
3801
3802
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003803LayerTestResult<float,4> MultiplicationTest(
3804 armnn::IWorkloadFactory& workloadFactory,
3805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003806{
3807 const unsigned int width = 2;
3808 const unsigned int height = 2;
3809 const unsigned int channelCount = 2;
3810 const unsigned int batchSize = 2;
3811
3812 unsigned int shape[] = { batchSize, channelCount, height, width };
3813
3814 std::vector<float> input0({
3815 1, 1, 1, 1, 2, 2, 2, 2,
3816 3, 3, 3, 3, 4, 4, 4, 4 });
3817
3818 std::vector<float> input1({
3819 2, 2, 2, 2, 3, 3, 3, 3,
3820 4, 4, 4, 4, 5, 5, 5, 5 });
3821
3822 std::vector<float> output({
3823 2, 2, 2, 2, 6, 6, 6, 6,
3824 12, 12, 12, 12, 20, 20, 20, 20 });
3825
3826 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003827 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003828 shape,
3829 input0,
3830 shape,
3831 input1,
3832 shape,
3833 output);
3834}
3835
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003836LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3837 armnn::IWorkloadFactory& workloadFactory,
3838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003839{
3840 unsigned int shape0[] = { 1, 2, 2, 2 };
3841 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3842
3843 unsigned int shape1[] = { 1, 1, 1, 1 };
3844 std::vector<float> input1({ 2 });
3845
3846 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3847
3848 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003849 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003850 shape0,
3851 input0,
3852 shape1,
3853 input1,
3854 shape0,
3855 output);
3856}
3857
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003858LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3859 armnn::IWorkloadFactory& workloadFactory,
3860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003861{
3862 unsigned int shape0[] = { 1, 3, 3, 2 };
3863 std::vector<float> input0({
3864 1, 2, 3, 4, 5, 6,
3865 7, 8, 9, 10, 11, 12,
3866 13, 14, 15, 16, 17, 18});
3867
3868 unsigned int shape1[] = { 1, 1, 1, 2 };
3869 std::vector<float> input1({ 1, 2 });
3870
3871 std::vector<float> output({
3872 1, 4, 3, 8, 5, 12,
3873 7, 16, 9, 20, 11, 24,
3874 13, 28, 15, 32, 17, 36});
3875
3876 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003877 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003878 shape0,
3879 input0,
3880 shape1,
3881 input1,
3882 shape0,
3883 output);
3884}
telsoa014fcda012018-03-09 14:13:49 +00003885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003886LayerTestResult<float,4> CompareMultiplicationTest(
3887 armnn::IWorkloadFactory& workloadFactory,
3888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3889 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003890{
3891 const unsigned int width = 16;
3892 const unsigned int height = 32;
3893 const unsigned int channelCount = 2;
3894 const unsigned int batchSize = 5;
3895
3896 armnn::TensorInfo inputTensorInfo0;
3897 armnn::TensorInfo inputTensorInfo1;
3898 armnn::TensorInfo outputTensorInfo;
3899
3900 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3901
3902 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3903 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3904 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3905
3906 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3907
3908 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3909 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3910
3911 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3912 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3913 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3914
3915 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3916 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3917 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3918
3919 armnn::MultiplicationQueueDescriptor data;
3920 armnn::WorkloadInfo info;
3921 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3922 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3923 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3924
3925 armnn::MultiplicationQueueDescriptor refData = data;
3926 armnn::WorkloadInfo refInfo = info;
3927 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3928 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3929 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3930
3931 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3932 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3933
3934 inputHandle0->Allocate();
3935 inputHandle1->Allocate();
3936 outputHandle->Allocate();
3937 inputHandle0Ref->Allocate();
3938 inputHandle1Ref->Allocate();
3939 outputHandleRef->Allocate();
3940
3941 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3942 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3943 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3944 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3945
Derek Lambertif30f7d32019-04-09 10:25:02 +01003946 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003947 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003948 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003949 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003950 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3951 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3952
3953 return comparisonResult;
3954}
3955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003956LayerTestResult<float,4> CompareBatchNormTest(
3957 armnn::IWorkloadFactory& workloadFactory,
3958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3959 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003960{
3961 const unsigned int width = 2;
3962 const unsigned int height = 3;
3963 const unsigned int channels = 5;
3964 const unsigned int batchSize = 3;
3965
3966 armnn::TensorInfo inputTensorInfo;
3967 armnn::TensorInfo outputTensorInfo;
3968 armnn::TensorInfo tensorInfo;
3969
3970 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3971 constexpr unsigned int tensorShape[] = {channels};
3972
3973 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3974 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3975 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3976
3977 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3978
3979 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3980 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3981 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3982 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3983
3984 LayerTestResult<float,4> ret(outputTensorInfo);
3985
3986 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3987 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3988
3989 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3990 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3991
3992 armnn::BatchNormalizationQueueDescriptor data;
3993 armnn::WorkloadInfo info;
3994 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3995 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3996 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3997 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3998
3999 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
4000 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
4001 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
4002 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
4003
4004 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
4005 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
4006 data.m_Mean = &meanTensor;
4007 data.m_Variance = &varianceTensor;
4008 data.m_Beta = &betaTensor;
4009 data.m_Gamma = &gammaTensor;
4010 data.m_Parameters.m_Eps = 0.01f;
4011
4012 armnn::BatchNormalizationQueueDescriptor refData = data;
4013 armnn::WorkloadInfo refInfo = info;
4014 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
4015 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
4016
4017 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
4018 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
4019
4020 inputHandle->Allocate();
4021 outputHandle->Allocate();
4022 inputHandleRef->Allocate();
4023 outputHandleRef->Allocate();
4024
4025 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4026 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
4027
Derek Lambertif30f7d32019-04-09 10:25:02 +01004028 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004029 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01004030 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004031 workloadRef->Execute();
4032
4033 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
4034 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
4035
4036 return ret;
4037}
4038
surmeh013537c2c2018-05-18 16:31:43 +01004039template<typename T>
4040void PermuteTensorData(
4041 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004043 const armnn::PermutationVector& mappings,
4044 armnn::TensorInfo & inputTensorInfo,
4045 const T * inputData,
4046 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00004047{
surmeh013537c2c2018-05-18 16:31:43 +01004048 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
4049 if (inputData == nullptr)
4050 {
4051 // Nullptr is an error in the test. By returning without doing the concatenation
4052 // I expect the caller to fail the test. It still makes sense to report this as
4053 // an assert for Debug builds.
4054 return;
4055 }
telsoa014fcda012018-03-09 14:13:49 +00004056
surmeh013537c2c2018-05-18 16:31:43 +01004057 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
4058
4059 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4060 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4061
4062 armnn::PermuteQueueDescriptor queueDescriptor;
4063 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
4064 armnn::WorkloadInfo workloadInfo;
4065 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
4066 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4067
4068 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
4069
4070 inputHandle->Allocate();
4071 outputHandle->Allocate();
4072
4073 CopyDataToITensorHandle(inputHandle.get(), inputData);
4074
Derek Lambertif30f7d32019-04-09 10:25:02 +01004075 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01004076 workload->Execute();
4077
4078 outputData.resize(outputTensorInfo.GetNumElements());
4079 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
4080 inputTensorInfo = outputTensorInfo;
4081}
4082
Jim Flynn825af452019-05-20 12:49:28 +01004083armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01004084 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4085 unsigned int concatDim)
4086{
telsoa014fcda012018-03-09 14:13:49 +00004087 std::vector<armnn::TensorShape> shapes;
4088 shapes.reserve(inputTensorInfos.size());
4089 for (const armnn::TensorInfo& it: inputTensorInfos)
4090 {
4091 shapes.push_back(it.GetShape());
4092 }
surmeh013537c2c2018-05-18 16:31:43 +01004093
Jim Flynn825af452019-05-20 12:49:28 +01004094 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
4095 shapes.end(),
4096 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01004097}
4098
4099//
narpra015cdda352018-11-19 15:30:27 +00004100// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
4101// In case of <4 dimensions we need to make sure that the concat dimensions are at least
4102// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01004103//
4104
4105bool NeedPermuteForConcat(
4106 const std::vector<armnn::TensorInfo> & inputTensorInfos,
4107 unsigned int concatDim)
4108{
4109 // See note above. Additionally we expect the input shapes to have the
4110 // same number of dimensions.
4111 unsigned int nDimensions = 0;
4112
telsoa01c577f2c2018-08-31 09:22:23 +01004113 // Determine the number of dimensions as well as sanity check them
4114 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01004115 for (auto && tensorInfo : inputTensorInfos)
4116 {
4117 if (!nDimensions)
4118 {
4119 nDimensions = tensorInfo.GetShape().GetNumDimensions();
4120 }
4121 else
4122 {
4123 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
4124 "Input shapes must have the same number of dimensions");
4125 }
4126 }
4127
narpra015cdda352018-11-19 15:30:27 +00004128 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01004129}
4130
4131armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
4132{
4133 unsigned int numDims = inputShape.GetNumDimensions();
4134 if (numDims >= 3)
4135 {
4136 // Nothing to do if the inputShape has at least 3 dimensions.
4137 return inputShape;
4138 }
4139
4140 std::vector<unsigned int> newDims(size_t(3), 1u);
4141 unsigned int expandedBy = 3 - numDims;
4142 for (unsigned int i=0; i<numDims; ++i)
4143 {
4144 newDims[expandedBy+i] = inputShape[i];
4145 }
4146 return armnn::TensorShape(3u, &newDims[0]);
4147}
4148
4149void Generate3dPermuteVectorForConcat(
4150 unsigned int numDimensions,
4151 unsigned int & concatDim,
4152 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
4153{
4154 BOOST_ASSERT_MSG(numDimensions <= 3,
4155 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01004156 unsigned int expandedBy = 3 - numDimensions;
4157 unsigned int expandedConcatAxis = concatDim + expandedBy;
4158
4159 if (expandedConcatAxis == 2)
4160 {
4161 concatDim = 0;
4162 armnn::PermutationVector forwardPermutation({1, 2, 0});
4163 armnn::PermutationVector reversePermutation({2, 0, 1});
4164 permutations = std::make_pair(forwardPermutation, reversePermutation);
4165 }
4166 else if (expandedConcatAxis == 1)
4167 {
4168 concatDim = 0;
4169 armnn::PermutationVector forwardPermutation({2, 0, 1});
4170 armnn::PermutationVector reversePermutation({1, 2, 0});
4171 permutations = std::make_pair(forwardPermutation, reversePermutation);
4172 }
4173 else
4174 {
4175 BOOST_ASSERT(expandedConcatAxis == 0);
4176 concatDim = 0;
4177 }
4178}
4179
4180//
4181// Permute the input tensors so we can do a supported concatenation.
4182// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
4183// at the front. Finally this function tells what the output shape
4184// of the permuted concatenated tensor is going to be.
4185//
4186template <typename T>
4187void PermuteInputsForConcat(
4188 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004190 std::vector<armnn::TensorInfo> & inputTensorInfos,
4191 std::vector<T *> & inputData,
4192 std::vector<std::vector<T>> & inputDataStorage,
4193 armnn::PermutationVector & permuteVector,
4194 unsigned int & concatDim,
4195 armnn::TensorInfo & outputTensorInfo)
4196{
4197 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
4198 "Expecting more than one tensor to be concatenated here");
4199
4200 unsigned int numDims = 0;
4201 unsigned int nthInput = 0;
4202 const armnn::PermutationVector identity({0, 1, 2});
4203
4204 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
4205 std::make_pair(identity, identity);
4206
4207 inputDataStorage.resize(inputData.size());
4208
4209 for (auto && tensorInfo : inputTensorInfos)
4210 {
4211 if (numDims == 0)
4212 {
4213 numDims = tensorInfo.GetShape().GetNumDimensions();
4214 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00004215
telsoa01c577f2c2018-08-31 09:22:23 +01004216 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01004217 permuteVector = permutations.second;
4218 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
4219 "Test logic error, we don't need permutation, so we shouldn't arrive here");
4220 }
4221 else
4222 {
4223 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
4224 "All inputs must have the same number of dimensions");
4225 }
4226
4227 armnn::TensorInfo newTensorInfo = tensorInfo;
4228 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
4229
4230 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004231 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004232 permutations.first,
4233 newTensorInfo,
4234 inputData[nthInput],
4235 inputDataStorage[nthInput]);
4236
4237 inputData[nthInput] = inputDataStorage[nthInput].data();
4238 inputTensorInfos[nthInput] = newTensorInfo;
4239
4240 ++nthInput;
4241 }
4242
4243 outputTensorInfo.SetShape(
4244 armnnUtils::Permuted(
4245 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
4246 permutations.first));
4247}
4248
4249
4250//
4251// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01004252// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01004253// output.
4254//
4255template <typename T>
4256void PermuteOutputForConcat(
4257 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004259 const armnn::TensorInfo & tensorInfo,
4260 const armnn::PermutationVector & permuteVector,
4261 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
4262 T * data)
4263{
4264 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
4265 if (data == nullptr)
4266 {
4267 // Nullptr is an error in the test. By returning without doing the permutation
4268 // I expect the caller to fail the test. It still makes sense to report this as
4269 // an assert for Debug builds.
4270 return;
4271 }
4272
4273 armnn::TensorInfo resultTensorInfo = tensorInfo;
4274 std::vector<T> inputData(tensorInfo.GetNumElements());
4275 std::vector<T> outputData;
4276
4277 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
4278
4279 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004280 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004281 permuteVector,
4282 resultTensorInfo,
4283 &inputData[0],
4284 outputData);
4285
4286 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
4287}
4288
4289template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004290void Concatenate(
4291 armnn::IWorkloadFactory& workloadFactory,
4292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4293 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
4294 std::initializer_list<T *> inputsOrig,
4295 const armnn::TensorInfo& outputTensorInfoOrig,
4296 T * output,
narpra015cdda352018-11-19 15:30:27 +00004297 unsigned int concatDim,
4298 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01004299{
4300 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
4301 if (output == nullptr)
4302 {
4303 // Nullptr is an error in the test. By returning without doing the permutation
4304 // I expect the caller to fail the test. It still makes sense to report this as
4305 // an assert for Debug builds.
4306 return;
4307 }
4308
telsoa01c577f2c2018-08-31 09:22:23 +01004309 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01004310 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
4311 std::vector<T *> inputs = inputsOrig;
4312 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
4313
4314 armnn::PermutationVector permuteVector{0, 1, 2};
4315
telsoa01c577f2c2018-08-31 09:22:23 +01004316 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01004317 std::vector<std::vector<T>> tmpInputDataStorage;
4318
4319 const size_t inputCount = inputTensorInfos.size();
4320
4321 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
4322
4323 if (needPermuteForConcat)
4324 {
4325 //
4326 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01004327 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01004328 //
4329 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004330 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004331 inputTensorInfos,
4332 inputs,
4333 tmpInputDataStorage,
4334 permuteVector,
4335 concatDim,
4336 outputTensorInfo);
4337 }
4338
narpra015cdda352018-11-19 15:30:27 +00004339 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00004340
4341 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
4342 inputHandles.reserve(inputCount);
4343
narpra015cdda352018-11-19 15:30:27 +00004344 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4345
Jim Flynne242f2d2019-05-22 14:24:13 +01004346 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01004347 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00004348 queueDescriptor.m_Parameters = viewsDescriptor;
4349
4350 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004351 {
narpra015cdda352018-11-19 15:30:27 +00004352 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
4353 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
4354 {
4355 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
4356 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
4357 }
telsoa014fcda012018-03-09 14:13:49 +00004358
narpra015cdda352018-11-19 15:30:27 +00004359 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00004360
narpra015cdda352018-11-19 15:30:27 +00004361 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
4362 for (unsigned int i = 0; i < inputCount; ++i)
4363 {
4364 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
4365 std::unique_ptr<armnn::ITensorHandle> inputHandle =
4366 subTensorsSupported ?
4367 workloadFactory.CreateSubTensorHandle(*outputHandle,
4368 inputTensorInfo.GetShape(),
4369 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
4370 workloadFactory.CreateTensorHandle(inputTensorInfo);
4371
4372 inputHandles.emplace_back(std::move(inputHandle));
4373 }
4374
telsoa014fcda012018-03-09 14:13:49 +00004375 }
narpra015cdda352018-11-19 15:30:27 +00004376 else
4377 {
4378 for (unsigned int i = 0; i < inputCount; ++i)
4379 {
4380 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
4381 inputHandles.emplace_back(std::move(inputHandle));
4382 }
4383 }
telsoa014fcda012018-03-09 14:13:49 +00004384
4385 for (unsigned int i = 0; i < inputCount; ++i)
4386 {
surmeh013537c2c2018-05-18 16:31:43 +01004387 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00004388 }
4389
4390 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
4391
Jim Flynn4ed6c832019-05-20 11:02:46 +01004392 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00004393
4394 for (auto& inputHandle : inputHandles)
4395 {
4396 inputHandle->Allocate();
4397 }
4398
4399 outputHandle->Allocate();
4400
4401 unsigned int nextInputId = 0;
4402 for (auto& inputHandle : inputHandles)
4403 {
surmeh013537c2c2018-05-18 16:31:43 +01004404 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
4405 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00004406 }
4407
Derek Lambertif30f7d32019-04-09 10:25:02 +01004408 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00004409 workload->Execute();
4410
surmeh013537c2c2018-05-18 16:31:43 +01004411 if (needPermuteForConcat)
4412 {
4413 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004414 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01004415 outputTensorInfo,
4416 permuteVector,
4417 std::move(outputHandle),
4418 output);
4419 }
4420 else
4421 {
4422 CopyDataFromITensorHandle(output, outputHandle.get());
4423 }
telsoa014fcda012018-03-09 14:13:49 +00004424}
4425
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004426template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004427LayerTestResult<T, 1> Concatenation1dTestImpl(
4428 armnn::IWorkloadFactory& workloadFactory,
4429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4430 float qScale,
4431 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004432{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004433 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004434
4435 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
4436 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
4437 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
4438
Jim Flynncbb66aa2019-05-15 13:03:54 +01004439 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004440
4441 LayerTestResult<T, 1> result(outputTensorInfo);
4442
4443 std::vector<T> output;
4444 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004445 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004446 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4447 { input0.data(), input1.data(), input2.data() },
4448 outputTensorInfo,
4449 output.data(),
4450 0,
4451 true);
telsoa014fcda012018-03-09 14:13:49 +00004452
4453 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
4454 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4455 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
4456 }));
4457
4458 return result;
4459}
4460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004461LayerTestResult<float, 1> Concatenation1dTest(
4462 armnn::IWorkloadFactory& workloadFactory,
4463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004464{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004465 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004466}
4467
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004468template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004469LayerTestResult<T, 2> Concatenation2dTestImpl(
4470 armnn::IWorkloadFactory& workloadFactory,
4471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004472 const armnn::TensorInfo& outputTensorInfo,
4473 unsigned int dimension,
4474 const float qScale,
4475 const int32_t qOffset)
4476{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004477 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004478
4479 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4480 // Batch 0
4481 1.0f, 2.0f, 3.0f,
4482
4483 // Batch 1
4484 10.0f, 11.0f, 12.0f,
4485 }));
4486
4487 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4488 // Batch 0
4489 4.0f, 5.0f, 6.0f,
4490
4491 // Batch 1
4492 13.0f, 14.0f, 15.0f,
4493 }));
4494
4495 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4496 // Batch 0
4497 7.0f, 8.0f, 9.0f,
4498
4499 // Batch 1
4500 16.0f, 17.0f, 18.0f,
4501 }));
4502
4503 LayerTestResult<T, 2> result(outputTensorInfo);
4504
4505 std::vector<T> output;
4506 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004507 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004508 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4509 { input0.data(), input1.data(), input2.data() },
4510 outputTensorInfo,
4511 output.data(),
4512 dimension,
4513 true);
telsoa014fcda012018-03-09 14:13:49 +00004514
4515 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4516 return result;
4517}
4518
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004519template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004520LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
4521 armnn::IWorkloadFactory& workloadFactory,
4522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4523 float qScale,
4524 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004525{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004526 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004528 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4529 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
4530
telsoa014fcda012018-03-09 14:13:49 +00004531 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4532 // Batch 0
4533 1.0f, 2.0f, 3.0f,
4534
4535 // Batch 1
4536 10.0f, 11.0f, 12.0f,
4537
4538 // Batch 2
4539 4.0f, 5.0f, 6.0f,
4540
4541 // Batch 3
4542 13.0f, 14.0f, 15.0f,
4543
4544 // Batch 4
4545 7.0f, 8.0f, 9.0f,
4546
4547 // Batch 5
4548 16.0f, 17.0f, 18.0f,
4549 }));
4550
4551 return result;
4552}
4553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004554LayerTestResult<float, 2> Concatenation2dDim0Test(
4555 armnn::IWorkloadFactory& workloadFactory,
4556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004557{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004558 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004559}
4560
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004561template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004562LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
4563 armnn::IWorkloadFactory& workloadFactory,
4564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4565 float qScale,
4566 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004567{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004568 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004570 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
4571 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
4572
telsoa014fcda012018-03-09 14:13:49 +00004573 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4574 // Batch 0
4575 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4576
4577 // Batch 1
4578 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
4579 }));
4580
4581 return result;
4582}
4583
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004584LayerTestResult<float, 2> Concatenation2dDim1Test(
4585 armnn::IWorkloadFactory& workloadFactory,
4586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004587{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004588 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004589}
4590
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004591template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004592LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
4593 armnn::IWorkloadFactory& workloadFactory,
4594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4595 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004596 int32_t qOffset)
4597{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004598 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004599 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4600 // Batch 0
4601 1.0f, 2.0f, 3.0f,
4602
4603 // Batch 1
4604 10.0f, 11.0f, 12.0f,
4605 }));
4606
Jim Flynncbb66aa2019-05-15 13:03:54 +01004607 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004608 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4609 // Batch 0
4610 4.0f, 5.0f, 6.0f,
4611
4612 // Batch 1
4613 13.0f, 14.0f, 15.0f,
4614
4615 // Batch 0
4616 7.0f, 8.0f, 9.0f,
4617 }));
4618
Jim Flynncbb66aa2019-05-15 13:03:54 +01004619 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004620 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4621 // Batch 1
4622 16.0f, 17.0f, 18.0f,
4623 }));
4624
Jim Flynncbb66aa2019-05-15 13:03:54 +01004625 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004626 LayerTestResult<T, 2> result(outputTensorInfo);
4627
4628 std::vector<T> output;
4629 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004630 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004631 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4632 { input0.data(), input1.data(), input2.data() },
4633 outputTensorInfo,
4634 output.data(),
4635 0,
4636 true);
telsoa014fcda012018-03-09 14:13:49 +00004637
4638 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4639 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4640 // Batch 0
4641 1.0f, 2.0f, 3.0f,
4642
4643 // Batch 1
4644 10.0f, 11.0f, 12.0f,
4645
4646 // Batch 2
4647 4.0f, 5.0f, 6.0f,
4648
4649 // Batch 3
4650 13.0f, 14.0f, 15.0f,
4651
4652 // Batch 4
4653 7.0f, 8.0f, 9.0f,
4654
4655 // Batch 5
4656 16.0f, 17.0f, 18.0f,
4657 }));
4658
4659 return result;
4660}
4661
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004662LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
4663 armnn::IWorkloadFactory& workloadFactory,
4664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004665{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004666 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4667 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004668}
4669
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004670template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004671LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
4672 armnn::IWorkloadFactory& workloadFactory,
4673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4674 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004675 int32_t qOffset)
4676{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004677 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004678 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4679 // Batch 0
4680 1.0f, 2.0f, 3.0f,
4681
4682 // Batch 1
4683 10.0f, 11.0f, 12.0f,
4684 }));
4685
Jim Flynncbb66aa2019-05-15 13:03:54 +01004686 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004687 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4688 // Batch 0
4689 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
4690
4691 // Batch 1
4692 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
4693 }));
4694
Jim Flynncbb66aa2019-05-15 13:03:54 +01004695 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004696 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4697 // Batch 0
4698 9.0f,
4699
4700 // Batch 1
4701 18.0f
4702 }));
4703
Jim Flynncbb66aa2019-05-15 13:03:54 +01004704 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004705 LayerTestResult<T, 2> result(outputTensorInfo);
4706
4707 std::vector<T> output;
4708 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004709 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004710 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4711 { input0.data(), input1.data(), input2.data() },
4712 outputTensorInfo,
4713 output.data(),
4714 1,
4715 true);
telsoa014fcda012018-03-09 14:13:49 +00004716
4717 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
4718 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4719 // Batch 0
4720 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
4721
4722 // Batch 1
4723 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
4724 }));
4725
4726 return result;
4727}
4728
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004729LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
4730 armnn::IWorkloadFactory& workloadFactory,
4731 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004732{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004733 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4734 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004735}
4736
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004737template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004738LayerTestResult<T, 3> Concatenation3dTestImpl(
4739 armnn::IWorkloadFactory& workloadFactory,
4740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00004741 const armnn::TensorInfo& outputTensorInfo,
4742 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00004743 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00004744 float qScale,
4745 int32_t qOffset)
4746{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004747 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004748
4749 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4750 // Batch 0, Channel 0
4751 1.0f, 2.0f,
4752
4753 // Batch 0, Channel 1
4754 3.0f, 4.0f,
4755
4756 // Batch 0, Channel 2
4757 5.0f, 6.0f,
4758
4759 // Batch 1, Channel 0
4760 19.0f, 20.0f,
4761
4762 // Batch 1, Channel 1
4763 21.0f, 22.0f,
4764
4765 // Batch 1, Channel 2
4766 23.0f, 24.0f
4767 }));
4768
4769 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4770 // Batch 0, Channel 0
4771 7.0f, 8.0f,
4772
4773 // Batch 0, Channel 1
4774 9.0f, 10.0f,
4775
4776 // Batch 0, Channel 2
4777 11.0f, 12.0f,
4778
4779 // Batch 1, Channel 0
4780 25.0f, 26.0f,
4781
4782 // Batch 1, Channel 1
4783 27.0f, 28.0f,
4784
4785 // Batch 1, Channel 2
4786 29.0f, 30.0f
4787 }));
4788
4789 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4790 // Batch 0, Channel 0
4791 13.0f, 14.0f,
4792
4793 // Batch 0, Channel 1
4794 15.0f, 16.0f,
4795
4796 // Batch 0, Channel 2
4797 17.0f, 18.0f,
4798
4799 // Batch 1, Channel 0
4800 31.0f, 32.0f,
4801
4802 // Batch 1, Channel 1
4803 33.0f, 34.0f,
4804
4805 // Batch 1, Channel 2
4806 35.0f, 36.0f
4807 }));
4808
4809 LayerTestResult<T, 3> result(outputTensorInfo);
4810
4811 std::vector<T> output;
4812 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004813 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004814 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4815 { input0.data(), input1.data(), input2.data() },
4816 outputTensorInfo,
4817 output.data(),
4818 dimension,
4819 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004820
4821 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4822 return result;
4823}
4824
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004825template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004826LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4827 armnn::IWorkloadFactory& workloadFactory,
4828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4829 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004830 int32_t qOffset)
4831{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004832 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004833
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004834 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4835 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4836
telsoa014fcda012018-03-09 14:13:49 +00004837 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4838 // Batch 0, Channel 0
4839 1.0f, 2.0f,
4840
4841 // Batch 0, Channel 1
4842 3.0f, 4.0f,
4843
4844 // Batch 0, Channel 2
4845 5.0f, 6.0f,
4846
4847 // Batch 1, Channel 0
4848 19.0f, 20.0f,
4849
4850 // Batch 1, Channel 1
4851 21.0f, 22.0f,
4852
4853 // Batch 1, Channel 2
4854 23.0f, 24.0f,
4855
4856 // Batch 2, Channel 0
4857 7.0f, 8.0f,
4858
4859 // Batch 2, Channel 1
4860 9.0f, 10.0f,
4861
4862 // Batch 2, Channel 2
4863 11.0f, 12.0f,
4864
4865 // Batch 3, Channel 0
4866 25.0f, 26.0f,
4867
4868 // Batch 3, Channel 1
4869 27.0f, 28.0f,
4870
4871 // Batch 3, Channel 2
4872 29.0f, 30.0f,
4873
4874 // Batch 4, Channel 0
4875 13.0f, 14.0f,
4876
4877 // Batch 4, Channel 1
4878 15.0f, 16.0f,
4879
4880 // Batch 4, Channel 2
4881 17.0f, 18.0f,
4882
4883 // Batch 5, Channel 0
4884 31.0f, 32.0f,
4885
4886 // Batch 5, Channel 1
4887 33.0f, 34.0f,
4888
4889 // Batch 5, Channel 2
4890 35.0f, 36.0f
4891 }));
narpra015cdda352018-11-19 15:30:27 +00004892
telsoa014fcda012018-03-09 14:13:49 +00004893 return result;
4894}
4895
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004896LayerTestResult<float, 3> Concatenation3dDim0Test(
4897 armnn::IWorkloadFactory& workloadFactory,
4898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004899{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004900 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004901}
4902
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004903template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004904LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4905 armnn::IWorkloadFactory& workloadFactory,
4906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4907 float qScale,
4908 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004909{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004910 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004911
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004912 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4913 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004914
telsoa014fcda012018-03-09 14:13:49 +00004915 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4916 // Batch 0, Channel 0
4917 1.0f, 2.0f,
4918
4919 // Batch 0, Channel 1
4920 3.0f, 4.0f,
4921
4922 // Batch 0, Channel 2
4923 5.0f, 6.0f,
4924
4925 // Batch 0, Channel 3
4926 7.0f, 8.0f,
4927
4928 // Batch 0, Channel 4
4929 9.0f, 10.0f,
4930
4931 // Batch 0, Channel 5
4932 11.0f, 12.0f,
4933
4934 // Batch 0, Channel 6
4935 13.0f, 14.0f,
4936
4937 // Batch 0, Channel 7
4938 15.0f, 16.0f,
4939
4940 // Batch 0, Channel 8
4941 17.0f, 18.0f,
4942
4943 // Batch 1, Channel 0
4944 19.0f, 20.0f,
4945
4946 // Batch 1, Channel 1
4947 21.0f, 22.0f,
4948
4949 // Batch 1, Channel 2
4950 23.0f, 24.0f,
4951
4952 // Batch 1, Channel 3
4953 25.0f, 26.0f,
4954
4955 // Batch 1, Channel 4
4956 27.0f, 28.0f,
4957
4958 // Batch 1, Channel 5
4959 29.0f, 30.0f,
4960
4961 // Batch 1, Channel 6
4962 31.0f, 32.0f,
4963
4964 // Batch 1, Channel 7
4965 33.0f, 34.0f,
4966
4967 // Batch 1, Channel 8
4968 35.0f, 36.0f
4969 }));
4970
4971 return result;
4972}
4973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004974LayerTestResult<float, 3> Concatenation3dDim1Test(
4975 armnn::IWorkloadFactory& workloadFactory,
4976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004977{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004978 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004979}
4980
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004981template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004982LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4983 armnn::IWorkloadFactory& workloadFactory,
4984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004985 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004986 float qScale,
4987 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004988{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004989 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004990
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004991 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4992 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004993
telsoa014fcda012018-03-09 14:13:49 +00004994 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4995 // Batch 0, Channel 0
4996 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4997
4998 // Batch 0, Channel 1
4999 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
5000
5001 // Batch 0, Channel 2
5002 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
5003
5004 // Batch 1, Channel 0
5005 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
5006
5007 // Batch 1, Channel 1
5008 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
5009
5010 // Batch 1, Channel 2
5011 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
5012 }));
5013
5014 return result;
5015}
5016
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005017LayerTestResult<float, 3> Concatenation3dDim2Test(
5018 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5020 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00005021{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005022 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
5023 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005024}
5025
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005026template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005027LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
5028 armnn::IWorkloadFactory& workloadFactory,
5029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5030 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005031 int32_t qOffset)
5032{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005033 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005034 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5035 // Batch 0, Channel 0
5036 1.0f, 2.0f,
5037
5038 // Batch 0, Channel 1
5039 3.0f, 4.0f,
5040
5041 // Batch 0, Channel 2
5042 5.0f, 6.0f,
5043
5044 // Batch 1, Channel 0
5045 19.0f, 20.0f,
5046
5047 // Batch 1, Channel 1
5048 21.0f, 22.0f,
5049
5050 // Batch 1, Channel 2
5051 23.0f, 24.0f
5052 }));
5053
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005054 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005055 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5056 // Batch 0, Channel 0
5057 7.0f, 8.0f,
5058
5059 // Batch 0, Channel 1
5060 9.0f, 10.0f,
5061
5062 // Batch 0, Channel 2
5063 11.0f, 12.0f,
5064 }));
5065
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005066 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005067 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5068 // Batch 0, Channel 0
5069 25.0f, 26.0f,
5070
5071 // Batch 0, Channel 1
5072 27.0f, 28.0f,
5073
5074 // Batch 0, Channel 2
5075 29.0f, 30.0f,
5076
5077 // Batch 1, Channel 0
5078 13.0f, 14.0f,
5079
5080 // Batch 1, Channel 1
5081 15.0f, 16.0f,
5082
5083 // Batch 1, Channel 2
5084 17.0f, 18.0f,
5085
5086 // Batch 2, Channel 0
5087 31.0f, 32.0f,
5088
5089 // Batch 2, Channel 1
5090 33.0f, 34.0f,
5091
5092 // Batch 2, Channel 2
5093 35.0f, 36.0f
5094 }));
5095
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005096 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00005097 LayerTestResult<T, 3> result(outputTensorInfo);
5098
5099 std::vector<T> output;
5100 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005101 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005102 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5103 { input0.data(), input1.data(), input2.data() },
5104 outputTensorInfo,
5105 output.data(),
5106 0,
5107 true);
telsoa014fcda012018-03-09 14:13:49 +00005108
5109 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5110 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5111 // Batch 0, Channel 0
5112 1.0f, 2.0f,
5113
5114 // Batch 0, Channel 1
5115 3.0f, 4.0f,
5116
5117 // Batch 0, Channel 2
5118 5.0f, 6.0f,
5119
5120 // Batch 1, Channel 0
5121 19.0f, 20.0f,
5122
5123 // Batch 1, Channel 1
5124 21.0f, 22.0f,
5125
5126 // Batch 1, Channel 2
5127 23.0f, 24.0f,
5128
5129 // Batch 2, Channel 0
5130 7.0f, 8.0f,
5131
5132 // Batch 2, Channel 1
5133 9.0f, 10.0f,
5134
5135 // Batch 2, Channel 2
5136 11.0f, 12.0f,
5137
5138 // Batch 3, Channel 0
5139 25.0f, 26.0f,
5140
5141 // Batch 3, Channel 1
5142 27.0f, 28.0f,
5143
5144 // Batch 3, Channel 2
5145 29.0f, 30.0f,
5146
5147 // Batch 4, Channel 0
5148 13.0f, 14.0f,
5149
5150 // Batch 4, Channel 1
5151 15.0f, 16.0f,
5152
5153 // Batch 4, Channel 2
5154 17.0f, 18.0f,
5155
5156 // Batch 5, Channel 0
5157 31.0f, 32.0f,
5158
5159 // Batch 5, Channel 1
5160 33.0f, 34.0f,
5161
5162 // Batch 5, Channel 2
5163 35.0f, 36.0f
5164 }));
5165
5166 return result;
5167}
5168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005169LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
5170 armnn::IWorkloadFactory& workloadFactory,
5171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005173 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
5174 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005175}
5176
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005177template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005178LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
5179 armnn::IWorkloadFactory& workloadFactory,
5180 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5181 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005182 int32_t qOffset)
5183{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005184 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005185 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5186 // Batch 0, Channel 0
5187 1.0f, 2.0f,
5188
5189 // Batch 0, Channel 1
5190 3.0f, 4.0f,
5191
5192 // Batch 0, Channel 2
5193 5.0f, 6.0f,
5194
5195 // Batch 1, Channel 0
5196 19.0f, 20.0f,
5197
5198 // Batch 1, Channel 1
5199 21.0f, 22.0f,
5200
5201 // Batch 1, Channel 2
5202 23.0f, 24.0f
5203 }));
5204
Jim Flynncbb66aa2019-05-15 13:03:54 +01005205 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005206 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5207 // Batch 0, Channel 0
5208 7.0f, 8.0f,
5209
5210 // Batch 0, Channel 1
5211 9.0f, 10.0f,
5212
5213 // Batch 0, Channel 2
5214 11.0f, 12.0f,
5215
5216 // Batch 0, Channel 3
5217 25.0f, 26.0f,
5218
5219 // Batch 1, Channel 0
5220 27.0f, 28.0f,
5221
5222 // Batch 1, Channel 1
5223 29.0f, 30.0f,
5224
5225 // Batch 1, Channel 2
5226 13.0f, 14.0f,
5227
5228 // Batch 1, Channel 3
5229 15.0f, 16.0f,
5230 }));
5231
Jim Flynncbb66aa2019-05-15 13:03:54 +01005232 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005233 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5234 // Batch 0, Channel 0
5235 17.0f, 18.0f,
5236
5237 // Batch 1, Channel 0
5238 31.0f, 32.0f,
5239 }));
5240
Jim Flynncbb66aa2019-05-15 13:03:54 +01005241 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005242 LayerTestResult<T, 3> result(outputTensorInfo);
5243
5244 std::vector<T> output;
5245 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005246 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005247 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5248 { input0.data(), input1.data(), input2.data() },
5249 outputTensorInfo,
5250 output.data(),
5251 1,
5252 true);
telsoa014fcda012018-03-09 14:13:49 +00005253
5254 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5255 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5256 // Batch 0, Channel 0
5257 1.0f, 2.0f,
5258
5259 // Batch 0, Channel 1
5260 3.0f, 4.0f,
5261
5262 // Batch 0, Channel 2
5263 5.0f, 6.0f,
5264
5265 // Batch 0, Channel 3
5266 7.0f, 8.0f,
5267
5268 // Batch 0, Channel 4
5269 9.0f, 10.0f,
5270
5271 // Batch 0, Channel 5
5272 11.0f, 12.0f,
5273
5274 // Batch 0, Channel 6
5275 25.0f, 26.0f,
5276
5277 // Batch 0, Channel 7
5278 17.0f, 18.0f,
5279
5280 // Batch 1, Channel 0
5281 19.0f, 20.0f,
5282
5283 // Batch 1, Channel 1
5284 21.0f, 22.0f,
5285
5286 // Batch 1, Channel 2
5287 23.0f, 24.0f,
5288
5289 // Batch 1, Channel 3
5290 27.0f, 28.0f,
5291
5292 // Batch 1, Channel 4
5293 29.0f, 30.0f,
5294
5295 // Batch 1, Channel 5
5296 13.0f, 14.0f,
5297
5298 // Batch 1, Channel 6
5299 15.0f, 16.0f,
5300
5301 // Batch 1, Channel 7
5302 31.0f, 32.0f,
5303 }));
5304
5305 return result;
5306}
5307
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005308LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
5309 armnn::IWorkloadFactory& workloadFactory,
5310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005311{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005312 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
5313 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005314}
5315
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005316template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005317LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
5318 armnn::IWorkloadFactory& workloadFactory,
5319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005320 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005321 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00005322 int32_t qOffset)
5323{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005324 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005325 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5326 // Batch 0, Channel 0
5327 1.0f, 2.0f,
5328
5329 // Batch 0, Channel 1
5330 3.0f, 4.0f,
5331
5332 // Batch 0, Channel 2
5333 5.0f, 6.0f,
5334
5335 // Batch 1, Channel 0
5336 19.0f, 20.0f,
5337
5338 // Batch 1, Channel 1
5339 21.0f, 22.0f,
5340
5341 // Batch 1, Channel 2
5342 23.0f, 24.0f
5343 }));
5344
Jim Flynncbb66aa2019-05-15 13:03:54 +01005345 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005346 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5347 // Batch 0, Channel 0
5348 7.0f,
5349
5350 // Batch 0, Channel 1
5351 9.0f,
5352
5353 // Batch 0, Channel 2
5354 11.0f,
5355
5356 // Batch 1, Channel 0
5357 25.0f,
5358
5359 // Batch 1, Channel 1
5360 27.0f,
5361
5362 // Batch 1, Channel 2
5363 29.0f
5364 }));
5365
Jim Flynncbb66aa2019-05-15 13:03:54 +01005366 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005367 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
5368 // Batch 0, Channel 0
5369 13.0f, 14.0f, 50.0f,
5370
5371 // Batch 0, Channel 1
5372 15.0f, 16.0f, 51.0f,
5373
5374 // Batch 0, Channel 2
5375 17.0f, 18.0f, 52.0f,
5376
5377 // Batch 1, Channel 0
5378 31.0f, 32.0f, 53.0f,
5379
5380 // Batch 1, Channel 1
5381 33.0f, 34.0f, 54.0f,
5382
5383 // Batch 1, Channel 2
5384 35.0f, 36.0f, 55.0f,
5385 }));
5386
Jim Flynncbb66aa2019-05-15 13:03:54 +01005387 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00005388 LayerTestResult<T, 3> result(outputTensorInfo);
5389
5390 std::vector<T> output;
5391 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005392 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00005393 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
5394 { input0.data(), input1.data(), input2.data() },
5395 outputTensorInfo,
5396 output.data(),
5397 2,
5398 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005399
5400 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
5401 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5402 // Batch 0, Channel 0
5403 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
5404
5405 // Batch 0, Channel 1
5406 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
5407
5408 // Batch 0, Channel 2
5409 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
5410
5411 // Batch 1, Channel 0
5412 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
5413
5414 // Batch 1, Channel 1
5415 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
5416
5417 // Batch 1, Channel 2
5418 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
5419 }));
5420
5421 return result;
5422}
5423
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005424LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
5425 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00005426 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5427 bool useSubtensor)
5428{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005429 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
5430 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005431}
5432
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005433template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005434LayerTestResult<T, 4> Concatenation4dTestImpl(
5435 armnn::IWorkloadFactory& workloadFactory,
5436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5437 const armnn::TensorInfo& outputTensorInfo,
5438 unsigned int dimension,
5439 bool useSubtensor,
5440 float qScale,
5441 int32_t qOffset)
5442{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005443 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005444
5445 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5446 1.0f, 2.0f,
5447 3.0f, 4.0f,
5448 5.0f, 6.0f,
5449 7.0f, 8.0f,
5450 9.0f, 10.0f,
5451 11.0f, 12.0f
5452 }));
5453
5454 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5455 11.0f, 12.0f,
5456 13.0f, 14.0f,
5457 15.0f, 16.0f,
5458 17.0f, 18.0f,
5459 19.0f, 20.0f,
5460 21.0f, 22.0f
5461 }));
5462
5463 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5464 21.0f, 22.0f,
5465 23.0f, 24.0f,
5466 25.0f, 26.0f,
5467 27.0f, 28.0f,
5468 29.0f, 30.0f,
5469 31.0f, 32.0f
5470 }));
5471
5472 LayerTestResult<T, 4> result(outputTensorInfo);
5473
5474 std::vector<T> output;
5475 output.resize(outputTensorInfo.GetNumElements());
5476
5477 Concatenate<T>(workloadFactory,
5478 memoryManager,
5479 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
5480 {input0.data(), input1.data(), input2.data()},
5481 outputTensorInfo,
5482 output.data(),
5483 dimension,
5484 useSubtensor);
5485
5486 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5487 return result;
5488}
5489
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005490template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005491LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
5492 armnn::IWorkloadFactory& workloadFactory,
5493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5494 float qScale,
5495 int32_t qOffset)
5496{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005497 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005498
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005499 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5500 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
5501
narpra015cdda352018-11-19 15:30:27 +00005502 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5503 1.0f, 2.0f,
5504 3.0f, 4.0f,
5505 5.0f, 6.0f,
5506 7.0f, 8.0f,
5507 9.0f, 10.0f,
5508 11.0f, 12.0f,
5509
5510 11.0f, 12.0f,
5511 13.0f, 14.0f,
5512 15.0f, 16.0f,
5513 17.0f, 18.0f,
5514 19.0f, 20.0f,
5515 21.0f, 22.0f,
5516
5517 21.0f, 22.0f,
5518 23.0f, 24.0f,
5519 25.0f, 26.0f,
5520 27.0f, 28.0f,
5521 29.0f, 30.0f,
5522 31.0f, 32.0f
5523 }));
5524 return result;
5525}
5526
5527LayerTestResult<float, 4> Concatenation4dDim0Test(
5528 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005530{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005531 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005532}
5533
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005534template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005535LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
5536 armnn::IWorkloadFactory& workloadFactory,
5537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5538 float qScale,
5539 int32_t qOffset)
5540{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005541 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005542
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005543 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5544 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
5545
narpra015cdda352018-11-19 15:30:27 +00005546 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5547 1.0f, 2.0f,
5548 3.0f, 4.0f,
5549 5.0f, 6.0f,
5550 7.0f, 8.0f,
5551 9.0f, 10.0f,
5552 11.0f, 12.0f,
5553
5554 11.0f, 12.0f,
5555 13.0f, 14.0f,
5556 15.0f, 16.0f,
5557 17.0f, 18.0f,
5558 19.0f, 20.0f,
5559 21.0f, 22.0f,
5560
5561 21.0f, 22.0f,
5562 23.0f, 24.0f,
5563 25.0f, 26.0f,
5564 27.0f, 28.0f,
5565 29.0f, 30.0f,
5566 31.0f, 32.0f
5567 }));
5568
5569 return result;
5570}
5571
5572LayerTestResult<float, 4> Concatenation4dDim1Test(
5573 armnn::IWorkloadFactory& workloadFactory,
5574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5575{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005576 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005577}
5578
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005579template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005580LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
5581 armnn::IWorkloadFactory& workloadFactory,
5582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5583 float qScale,
5584 int32_t qOffset)
5585{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005586 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005587
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005588 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5589 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
5590
narpra015cdda352018-11-19 15:30:27 +00005591 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5592 1.0f, 2.0f,
5593 3.0f, 4.0f,
5594 11.0f, 12.0f,
5595 13.0f, 14.0f,
5596 21.0f, 22.0f,
5597 23.0f, 24.0f,
5598
5599 5.0f, 6.0f,
5600 7.0f, 8.0f,
5601 15.0f, 16.0f,
5602 17.0f, 18.0f,
5603 25.0f, 26.0f,
5604 27.0f, 28.0f,
5605
5606 9.0f, 10.0f,
5607 11.0f, 12.0f,
5608 19.0f, 20.0f,
5609 21.0f, 22.0f,
5610 29.0f, 30.0f,
5611 31.0f, 32.0f
5612 }));
5613
5614 return result;
5615}
5616
5617LayerTestResult<float, 4> Concatenation4dDim2Test(
5618 armnn::IWorkloadFactory& workloadFactory,
5619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5620{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005621 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005622}
5623
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005624template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005625LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
5626 armnn::IWorkloadFactory& workloadFactory,
5627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5628 float qScale,
5629 int32_t qOffset,
5630 bool useSubtensor)
5631{
Jim Flynncbb66aa2019-05-15 13:03:54 +01005632 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005633
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005634 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
5635 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
5636
narpra015cdda352018-11-19 15:30:27 +00005637 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5638 1.0f, 2.0f,
5639 11.0f, 12.0f,
5640 21.0f, 22.0f,
5641 3.0f, 4.0f,
5642 13.0f, 14.0f,
5643 23.0f, 24.0f,
5644
5645 5.0f, 6.0f,
5646 15.0f, 16.0f,
5647 25.0f, 26.0f,
5648 7.0f, 8.0f,
5649 17.0f, 18.0f,
5650 27.0f, 28.0f,
5651
5652 9.0f, 10.0f,
5653 19.0f, 20.0f,
5654 29.0f, 30.0f,
5655 11.0f, 12.0f,
5656 21.0f, 22.0f,
5657 31.0f, 32.0f
5658 }));
5659
5660 return result;
5661}
5662
5663LayerTestResult<float, 4> Concatenation4dDim3Test(
5664 armnn::IWorkloadFactory& workloadFactory,
5665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5666 bool useSubtensor)
5667{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005668 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
5669 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00005670}
5671
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005672template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005673LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
5674 armnn::IWorkloadFactory& workloadFactory,
5675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5676 float qScale,
5677 int32_t qOffset)
5678{
5679 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005680 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005681
5682 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5683 1.0f, 2.0f,
5684 3.0f, 4.0f,
5685 5.0f, 6.0f,
5686 7.0f, 8.0f,
5687 9.0f, 10.0f,
5688 11.0f, 12.0f
5689 }));
5690
Jim Flynncbb66aa2019-05-15 13:03:54 +01005691 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005692
5693 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5694 11.0f, 12.0f,
5695 13.0f, 14.0f,
5696 15.0f, 16.0f,
5697 17.0f, 18.0f,
5698 19.0f, 20.0f,
5699 21.0f, 22.0f,
5700
5701 21.0f, 22.0f,
5702 23.0f, 24.0f,
5703 25.0f, 26.0f,
5704 27.0f, 28.0f,
5705 29.0f, 30.0f,
5706 31.0f, 32.0f
5707
5708 }));
5709
Jim Flynncbb66aa2019-05-15 13:03:54 +01005710 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005711
5712 LayerTestResult<T, 4> result(outputTensorInfo);
5713
5714 std::vector<T> output;
5715 output.resize(outputTensorInfo.GetNumElements());
5716 Concatenate<T>(workloadFactory,
5717 memoryManager,
5718 {inputTensorInfo0, inputTensorInfo1},
5719 {input0.data(), input1.data()},
5720 outputTensorInfo,
5721 output.data(),
5722 dimension,
5723 true);
5724
5725 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5726 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5727 1.0f, 2.0f,
5728 3.0f, 4.0f,
5729 5.0f, 6.0f,
5730 7.0f, 8.0f,
5731 9.0f, 10.0f,
5732 11.0f, 12.0f,
5733
5734 11.0f, 12.0f,
5735 13.0f, 14.0f,
5736 15.0f, 16.0f,
5737 17.0f, 18.0f,
5738 19.0f, 20.0f,
5739 21.0f, 22.0f,
5740
5741 21.0f, 22.0f,
5742 23.0f, 24.0f,
5743 25.0f, 26.0f,
5744 27.0f, 28.0f,
5745 29.0f, 30.0f,
5746 31.0f, 32.0f
5747 }));
5748
5749 return result;
5750}
5751
5752LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
5753 armnn::IWorkloadFactory& workloadFactory,
5754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5755{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005756 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
5757 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005758}
5759
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005760template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005761LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
5762 armnn::IWorkloadFactory& workloadFactory,
5763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5764 float qScale,
5765 int32_t qOffset)
5766{
5767 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005768 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005769
5770 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5771 1.0f, 2.0f,
5772 3.0f, 4.0f,
5773 5.0f, 6.0f,
5774 7.0f, 8.0f,
5775 9.0f, 10.0f,
5776 11.0f, 12.0f
5777 }));
5778
Jim Flynncbb66aa2019-05-15 13:03:54 +01005779 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005780
5781 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5782 11.0f, 12.0f,
5783 13.0f, 14.0f,
5784 15.0f, 16.0f,
5785 17.0f, 18.0f,
5786
5787 }));
5788
Jim Flynncbb66aa2019-05-15 13:03:54 +01005789 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005790
5791 LayerTestResult<T, 4> result(outputTensorInfo);
5792
5793 std::vector<T> output;
5794 output.resize(outputTensorInfo.GetNumElements());
5795 Concatenate<T>(workloadFactory,
5796 memoryManager,
5797 {inputTensorInfo0, inputTensorInfo1},
5798 {input0.data(), input1.data()},
5799 outputTensorInfo,
5800 output.data(),
5801 dimension,
5802 true);
5803
5804 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5805 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5806 1.0f, 2.0f,
5807 3.0f, 4.0f,
5808 5.0f, 6.0f,
5809 7.0f, 8.0f,
5810 9.0f, 10.0f,
5811 11.0f, 12.0f,
5812 11.0f, 12.0f,
5813 13.0f, 14.0f,
5814 15.0f, 16.0f,
5815 17.0f, 18.0f
5816 }));
5817
5818 return result;
5819}
5820
5821LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5822 armnn::IWorkloadFactory& workloadFactory,
5823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5824{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005825 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5826 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005827}
5828
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005829template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005830LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5831 armnn::IWorkloadFactory& workloadFactory,
5832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5833 float qScale,
5834 int32_t qOffset)
5835{
5836 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005837 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005838
5839 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5840 1.0f, 2.0f,
5841 3.0f, 4.0f,
5842 5.0f, 6.0f,
5843 7.0f, 8.0f,
5844 9.0f, 10.0f,
5845 11.0f, 12.0f
5846 }));
5847
Jim Flynncbb66aa2019-05-15 13:03:54 +01005848 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005849
5850 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5851 11.0f, 12.0f,
5852 13.0f, 14.0f,
5853 15.0f, 16.0f,
5854 17.0f, 18.0f,
5855 19.0f, 20.0f,
5856 21.0f, 22.0f,
5857 23.0f, 24.0f,
5858 25.0f, 26.0f,
5859 27.0f, 28.0f
5860 }));
5861
Jim Flynncbb66aa2019-05-15 13:03:54 +01005862 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005863
5864 LayerTestResult<T, 4> result(outputTensorInfo);
5865
5866 std::vector<T> output;
5867 output.resize(outputTensorInfo.GetNumElements());
5868 Concatenate<T>(workloadFactory,
5869 memoryManager,
5870 {inputTensorInfo0, inputTensorInfo1},
5871 {input0.data(), input1.data()},
5872 outputTensorInfo,
5873 output.data(),
5874 dimension,
5875 true);
5876
5877 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5878 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5879 1.0f, 2.0f,
5880 3.0f, 4.0f,
5881 11.0f, 12.0f,
5882 13.0f, 14.0f,
5883 15.0f, 16.0f,
5884
5885 5.0f, 6.0f,
5886 7.0f, 8.0f,
5887 17.0f, 18.0f,
5888 19.0f, 20.0f,
5889 21.0f, 22.0f,
5890
5891 9.0f, 10.0f,
5892 11.0f, 12.0f,
5893 23.0f, 24.0f,
5894 25.0f, 26.0f,
5895 27.0f, 28.0f
5896 }));
5897
5898 return result;
5899}
5900
5901LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5902 armnn::IWorkloadFactory& workloadFactory,
5903 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005905 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5906 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005907}
5908
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005909template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005910LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5911 armnn::IWorkloadFactory& workloadFactory,
5912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5913 float qScale,
5914 int32_t qOffset,
5915 bool useSubtensor)
5916{
5917 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005918 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005919
5920 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5921 1.0f, 2.0f,
5922 3.0f, 4.0f,
5923 5.0f, 6.0f,
5924 7.0f, 8.0f,
5925 9.0f, 10.0f,
5926 11.0f, 12.0f
5927 }));
5928
Jim Flynncbb66aa2019-05-15 13:03:54 +01005929 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005930
5931 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5932 11.0f, 12.0f, 13.0f,
5933 14.0f, 15.0f, 16.0f,
5934
5935 17.0f, 18.0f, 19.0f,
5936 20.0f, 21.0f, 22.0f,
5937
5938 23.0f, 24.0f, 25.0f,
5939 26.0f, 27.0f, 28.0f
5940 }));
5941
Jim Flynncbb66aa2019-05-15 13:03:54 +01005942 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005943
5944 LayerTestResult<T, 4> result(outputTensorInfo);
5945
5946 std::vector<T> output;
5947 output.resize(outputTensorInfo.GetNumElements());
5948 Concatenate<T>(workloadFactory,
5949 memoryManager,
5950 {inputTensorInfo0, inputTensorInfo1},
5951 {input0.data(), input1.data()},
5952 outputTensorInfo,
5953 output.data(),
5954 dimension,
5955 useSubtensor);
5956
5957 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5958 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5959 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5960 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5961 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5962 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5963 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5964 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5965 }));
5966
5967 return result;
5968}
5969
5970LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5971 armnn::IWorkloadFactory& workloadFactory,
5972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5973 bool useSubtensor)
5974{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005975 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5976 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005977}
5978
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005979LayerTestResult<float, 2> FakeQuantizationTest(
5980 armnn::IWorkloadFactory& workloadFactory,
5981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005982{
5983 constexpr unsigned int width = 2;
5984 constexpr unsigned int height = 3;
5985
5986 const armnn::TensorInfo tensorInfo({height, width },
5987 armnn::DataType::Float32);
5988 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5989 -10.0f, -5.0f,
5990 0.0f, 5.0f,
5991 10.0f, 10.0f
5992 }));
5993
5994 LayerTestResult<float, 2> ret(tensorInfo);
5995
5996 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5997
5998 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5999
6000 armnn::FakeQuantizationQueueDescriptor data;
6001 armnn::WorkloadInfo info;
6002
6003 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
6004 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
6005 float min = -10.f;
6006 float max = 10.f;
6007
6008 data.m_Parameters.m_Min = min;
6009 data.m_Parameters.m_Max = max;
6010
6011 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
6012 armnn::FakeQuantizationQueueDescriptor refData = data;
6013 armnn::WorkloadInfo refInfo = info;
6014 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
6015
6016 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
6017
6018 inputHandle->Allocate();
6019 outputHandle->Allocate();
6020
6021 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
6022
Derek Lambertif30f7d32019-04-09 10:25:02 +01006023 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006024 workload->Execute();
6025
6026 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
6027
6028 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
6029 0.0f, 63.0f,
6030 128.0f, 191.0f,
6031 255.0f, 255.0f
6032 }));
6033 return ret;
6034}
6035
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006036namespace
6037{
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006038template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6039LayerTestResult<T, 4> L2NormalizationTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006040 armnn::IWorkloadFactory& workloadFactory,
6041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6042 const armnn::TensorShape& inputOutputTensorShape,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006043 float scale,
6044 int32_t offset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006045 const std::vector<float>& inputValues,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006046 float outScale,
6047 int32_t outOffset,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006048 const std::vector<float>& expectedOutputValues,
Ferran Balaguere52211e2019-06-17 12:23:52 +01006049 const armnn::DataLayout layout,
6050 float epsilon = 1e-12f)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006051{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006052 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
6053 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006054
jimfly013aab7c32018-11-12 13:32:08 +00006055 // at this point if we require it permute the input data
6056 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
6057 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006058 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006059 {
6060 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00006061 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006062 inputData = tmp;
6063 }
6064
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006065 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
6066 inputTensorInfo.GetQuantizationScale(),
6067 inputTensorInfo.GetQuantizationOffset(),
6068 inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006069
jimfly013aab7c32018-11-12 13:32:08 +00006070 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00006071 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00006072 {
6073 std::vector<float> tmp(expectedOutputData.size());
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006074 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
6075 sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00006076 expectedOutputData = tmp;
6077 }
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006078
6079 LayerTestResult<T, 4> result(outputTensorInfo);
6080 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
6081 outputTensorInfo.GetQuantizationScale(),
6082 outputTensorInfo.GetQuantizationOffset(),
6083 expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006084
6085 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6086 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6087
6088 armnn::L2NormalizationQueueDescriptor descriptor;
Ferran Balaguere52211e2019-06-17 12:23:52 +01006089 descriptor.m_Parameters.m_Eps = epsilon;
Matthew Bentham8800c002018-11-19 13:19:28 +00006090 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006091 armnn::WorkloadInfo info;
6092
6093 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6094 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6095
6096 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
6097
6098 inputHandle->Allocate();
6099 outputHandle->Allocate();
6100
6101 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6102
Derek Lambertif30f7d32019-04-09 10:25:02 +01006103 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006104 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006105
6106 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6107
6108 return result;
6109}
6110
6111float CalcInvL2Norm(std::initializer_list<float> elements)
6112{
6113 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
6114 [](float acc, float element) { return acc + element * element; });
6115 return 1.0f / sqrtf(reduction);
6116}
6117
6118} // anonymous namespace
6119
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006120template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006121LayerTestResult<T, 2> Pad2dTestCommon(
6122 armnn::IWorkloadFactory& workloadFactory,
6123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6124 float qScale,
David Monahan34757812019-06-19 11:47:21 +01006125 int32_t qOffset,
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006126 const float customPaddingValue)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006127{
Derek Lambertif30f7d32019-04-09 10:25:02 +01006128 const armnn::TensorShape inputShape{ 3, 3 };
6129 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006130
David Monahan34757812019-06-19 11:47:21 +01006131 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6132 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006133
Derek Lambertif30f7d32019-04-09 10:25:02 +01006134 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006135 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006136 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006137 // Height (3) x Width (3)
6138 4, 8, 6,
6139 7, 4, 4,
6140 3, 2, 4
6141 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006142
Teresa Charlinec8e1982019-07-02 16:24:09 +01006143 auto p = customPaddingValue;
David Monahan34757812019-06-19 11:47:21 +01006144 std::vector<T> expectedOutputValues;
Teresa Charlinec8e1982019-07-02 16:24:09 +01006145 expectedOutputValues = (
6146 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006147 {
Teresa Charlinec8e1982019-07-02 16:24:09 +01006148 p, p, p, p, p, p, p,
6149 p, p, p, p, p, p, p,
6150 p, p, 4, 8, 6, p, p,
6151 p, p, 7, 4, 4, p, p,
6152 p, p, 3, 2, 4, p, p,
6153 p, p, p, p, p, p, p,
6154 p, p, p, p, p, p, p
6155 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006156
Derek Lambertif30f7d32019-04-09 10:25:02 +01006157 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006158
Derek Lambertif30f7d32019-04-09 10:25:02 +01006159 LayerTestResult<T, 2> result(outputTensorInfo);
6160 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006161
Derek Lambertif30f7d32019-04-09 10:25:02 +01006162 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6163 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006164
Derek Lambertif30f7d32019-04-09 10:25:02 +01006165 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006166
Teresa Charlinec8e1982019-07-02 16:24:09 +01006167 std::vector<std::pair<unsigned int, unsigned int>> padList;
6168 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6169 padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006170
Teresa Charlinec8e1982019-07-02 16:24:09 +01006171 descriptor.m_Parameters.m_PadList = padList;
6172 descriptor.m_Parameters.m_PadValue = customPaddingValue;
Derek Lambertif30f7d32019-04-09 10:25:02 +01006173 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006174
Derek Lambertif30f7d32019-04-09 10:25:02 +01006175 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6176 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006177
Derek Lambertif30f7d32019-04-09 10:25:02 +01006178 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006179
Derek Lambertif30f7d32019-04-09 10:25:02 +01006180 inputHandle->Allocate();
6181 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006182
Derek Lambertif30f7d32019-04-09 10:25:02 +01006183 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006184
Derek Lambertif30f7d32019-04-09 10:25:02 +01006185 workload->PostAllocationConfigure();
6186 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006187
Derek Lambertif30f7d32019-04-09 10:25:02 +01006188 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006189
Derek Lambertif30f7d32019-04-09 10:25:02 +01006190 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006191}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006192
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006193template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006194LayerTestResult<T, 3> Pad3dTestCommon(
6195 armnn::IWorkloadFactory& workloadFactory,
6196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6197 float qScale,
6198 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006199{
6200 const armnn::TensorShape inputShape{ 2, 2, 2 };
6201 const armnn::TensorShape outputShape{ 3, 5, 6 };
6202
David Monahan34757812019-06-19 11:47:21 +01006203 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6204 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006205
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006206 std::vector<T> inputValues(
6207 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006208 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006209 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006210 0, 4,
6211 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006212
6213 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006214 6, 1,
6215 5, 2
6216 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006217
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006218 std::vector<T> expectedOutputValues(
6219 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006220 {
6221
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006222 0, 0, 0, 0, 0, 0,
6223 0, 0, 0, 0, 0, 0,
6224 0, 0, 0, 4, 0, 0,
6225 0, 0, 2, 5, 0, 0,
6226 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006227
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006228 0, 0, 0, 0, 0, 0,
6229 0, 0, 0, 0, 0, 0,
6230 0, 0, 6, 1, 0, 0,
6231 0, 0, 5, 2, 0, 0,
6232 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006233
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006234 0, 0, 0, 0, 0, 0,
6235 0, 0, 0, 0, 0, 0,
6236 0, 0, 0, 0, 0, 0,
6237 0, 0, 0, 0, 0, 0,
6238 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006239
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006240 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006241
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006242 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006243
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006244 LayerTestResult<T, 3> result(outputTensorInfo);
6245 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006246
6247 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6248 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6249
6250 armnn::PadQueueDescriptor descriptor;
6251
6252 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6253 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
6254 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6255 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
6256
6257 descriptor.m_Parameters.m_PadList = PadList;
6258 armnn::WorkloadInfo info;
6259
6260 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6261 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6262
6263 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6264
6265 inputHandle->Allocate();
6266 outputHandle->Allocate();
6267
6268 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
6269
Derek Lambertif30f7d32019-04-09 10:25:02 +01006270 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006271 workload->Execute();
6272
6273 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
6274
6275 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006276}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006277
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006278template<armnn::DataType ArmnnType, typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006279LayerTestResult<T, 4> Pad4dTestCommon(
6280 armnn::IWorkloadFactory& workloadFactory,
6281 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6282 float qScale,
6283 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006284{
6285 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
6286 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
6287
David Monahan34757812019-06-19 11:47:21 +01006288 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
6289 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006290
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006291 std::vector<T> inputValues(
6292 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006293 {
6294 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006295 0, 1,
6296 2, 3,
6297 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006298
6299 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006300 6, 7,
6301 8, 9,
6302 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006303
6304 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006305 12, 13,
6306 14, 15,
6307 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006308
6309 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006310 18, 19,
6311 20, 21,
6312 22, 23
6313 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006314
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006315 std::vector<T> expectedOutputValues(
6316 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006317 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006318 0, 0, 0, 0,
6319 0, 0, 0, 0,
6320 0, 0, 0, 0,
6321 0, 0, 0, 0,
6322 0, 0, 0, 0,
6323 0, 0, 0, 0,
6324 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006325
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006326 0, 0, 0, 0,
6327 0, 0, 0, 0,
6328 0, 0, 0, 0,
6329 0, 0, 0, 0,
6330 0, 0, 0, 0,
6331 0, 0, 0, 0,
6332 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006333
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006334 0, 0, 0, 0,
6335 0, 0, 0, 0,
6336 0, 0, 0, 0,
6337 0, 0, 0, 0,
6338 0, 0, 0, 0,
6339 0, 0, 0, 0,
6340 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006341
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006342 0, 0, 0, 0,
6343 0, 0, 0, 0,
6344 0, 0, 0, 0,
6345 0, 0, 0, 0,
6346 0, 0, 0, 0,
6347 0, 0, 0, 0,
6348 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006349
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006350 0, 0, 0, 0,
6351 0, 0, 0, 0,
6352 0, 0, 0, 0,
6353 0, 0, 0, 0,
6354 0, 0, 0, 0,
6355 0, 0, 0, 0,
6356 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006357
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006358 0, 0, 0, 0,
6359 0, 0, 0, 0,
6360 0, 0, 0, 0,
6361 0, 0, 0, 0,
6362 0, 0, 0, 0,
6363 0, 0, 0, 0,
6364 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006365
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006366 0, 0, 0, 0,
6367 0, 0, 0, 0,
6368 0, 0, 0, 0,
6369 0, 0, 0, 0,
6370 0, 0, 0, 0,
6371 0, 0, 0, 0,
6372 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006373
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006374 0, 0, 0, 0,
6375 0, 0, 0, 0,
6376 0, 0, 0, 0,
6377 0, 0, 1, 0,
6378 0, 2, 3, 0,
6379 0, 4, 5, 0,
6380 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006381
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006382 0, 0, 0, 0,
6383 0, 0, 0, 0,
6384 0, 0, 0, 0,
6385 0, 6, 7, 0,
6386 0, 8, 9, 0,
6387 0, 10, 11, 0,
6388 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006389
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006390 0, 0, 0, 0,
6391 0, 0, 0, 0,
6392 0, 0, 0, 0,
6393 0, 0, 0, 0,
6394 0, 0, 0, 0,
6395 0, 0, 0, 0,
6396 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006397
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006398 0, 0, 0, 0,
6399 0, 0, 0, 0,
6400 0, 0, 0, 0,
6401 0, 0, 0, 0,
6402 0, 0, 0, 0,
6403 0, 0, 0, 0,
6404 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006405
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006406 0, 0, 0, 0,
6407 0, 0, 0, 0,
6408 0, 0, 0, 0,
6409 0, 0, 0, 0,
6410 0, 0, 0, 0,
6411 0, 0, 0, 0,
6412 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006413
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006414 0, 0, 0, 0,
6415 0, 0, 0, 0,
6416 0, 0, 0, 0,
6417 0, 12, 13, 0,
6418 0, 14, 15, 0,
6419 0, 16, 17, 0,
6420 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006421
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006422 0, 0, 0, 0,
6423 0, 0, 0, 0,
6424 0, 0, 0, 0,
6425 0, 18, 19, 0,
6426 0, 20, 21, 0,
6427 0, 22, 23, 0,
6428 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006429
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006430 0, 0, 0, 0,
6431 0, 0, 0, 0,
6432 0, 0, 0, 0,
6433 0, 0, 0, 0,
6434 0, 0, 0, 0,
6435 0, 0, 0, 0,
6436 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006437
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006438 0, 0, 0, 0,
6439 0, 0, 0, 0,
6440 0, 0, 0, 0,
6441 0, 0, 0, 0,
6442 0, 0, 0, 0,
6443 0, 0, 0, 0,
6444 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006445
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006446 0, 0, 0, 0,
6447 0, 0, 0, 0,
6448 0, 0, 0, 0,
6449 0, 0, 0, 0,
6450 0, 0, 0, 0,
6451 0, 0, 0, 0,
6452 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006453
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006454 0, 0, 0, 0,
6455 0, 0, 0, 0,
6456 0, 0, 0, 0,
6457 0, 0, 0, 0,
6458 0, 0, 0, 0,
6459 0, 0, 0, 0,
6460 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006461
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006462 0, 0, 0, 0,
6463 0, 0, 0, 0,
6464 0, 0, 0, 0,
6465 0, 0, 0, 0,
6466 0, 0, 0, 0,
6467 0, 0, 0, 0,
6468 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006469
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006470 0, 0, 0, 0,
6471 0, 0, 0, 0,
6472 0, 0, 0, 0,
6473 0, 0, 0, 0,
6474 0, 0, 0, 0,
6475 0, 0, 0, 0,
6476 0, 0, 0, 0
6477 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006478
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006479 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006480
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006481 LayerTestResult<T, 4> result(outputTensorInfo);
6482 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006483
6484 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6485 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6486
6487 armnn::PadQueueDescriptor descriptor;
6488
6489 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6490 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6491 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6492 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6493 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6494
6495 descriptor.m_Parameters.m_PadList = PadList;
6496 armnn::WorkloadInfo info;
6497
6498 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6499 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6500
6501 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6502
6503 inputHandle->Allocate();
6504 outputHandle->Allocate();
6505
6506 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6507
Derek Lambertif30f7d32019-04-09 10:25:02 +01006508 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006509 workload->Execute();
6510
6511 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6512
6513 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006514}
6515
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006516LayerTestResult<uint8_t, 2> PadUint82dTest(
6517 armnn::IWorkloadFactory& workloadFactory,
6518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006519{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006520 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006521}
6522
David Monahan34757812019-06-19 11:47:21 +01006523LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
6524 armnn::IWorkloadFactory& workloadFactory,
6525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6526{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006527 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006528}
6529
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006530LayerTestResult<uint8_t, 3> PadUint83dTest(
6531 armnn::IWorkloadFactory& workloadFactory,
6532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006533{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006534 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006535}
6536
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006537LayerTestResult<uint8_t, 4> PadUint84dTest(
6538 armnn::IWorkloadFactory& workloadFactory,
6539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006540{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006541 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006542}
6543
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006544
6545template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
6546Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
6547 armnn::IWorkloadFactory& workloadFactory,
6548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6549 float qScale,
6550 int32_t qOffset,
6551 const float customPaddingValue);
6552
6553template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
6554Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
6555 armnn::IWorkloadFactory& workloadFactory,
6556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6557 float qScale,
6558 int32_t qOffset);
6559
6560template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
6561Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
6562 armnn::IWorkloadFactory& workloadFactory,
6563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6564 float qScale,
6565 int32_t qOffset);
6566
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006567LayerTestResult<float, 2> PadFloat322dTest(
6568 armnn::IWorkloadFactory& workloadFactory,
6569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006570{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006571 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006572}
6573
David Monahan34757812019-06-19 11:47:21 +01006574LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
6575 armnn::IWorkloadFactory& workloadFactory,
6576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6577{
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +01006578 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
David Monahan34757812019-06-19 11:47:21 +01006579}
6580
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006581LayerTestResult<float, 3> PadFloat323dTest(
6582 armnn::IWorkloadFactory& workloadFactory,
6583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006584{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006585 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006586}
6587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006588LayerTestResult<float, 4> PadFloat324dTest(
6589 armnn::IWorkloadFactory& workloadFactory,
6590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006591{
Teresa Charlinec8e1982019-07-02 16:24:09 +01006592 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006593}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006594
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006595template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguere52211e2019-06-17 12:23:52 +01006596LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
6597 armnn::IWorkloadFactory& workloadFactory,
6598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6599 float scale,
6600 int32_t offset,
6601 float outScale,
6602 int32_t outOffset,
6603 const armnn::DataLayout layout,
6604 float epsilon)
6605{
6606 // Width: 1
6607 // Height: 1
6608 // Channels: 3
6609 // BatchSize: 1
6610 unsigned int numberOfBatches = 1;
6611 unsigned int numberOfChannels = 3;
6612 unsigned int height = 1;
6613 unsigned int width = 1;
6614
6615 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
6616 numberOfBatches, numberOfChannels, height, width, layout);
6617
6618 // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
6619 std::vector<float> inputValues
6620 {
6621 // Batch 0, Channel 0, Height (1) x Width (1)
6622 0.00000001f,
6623
6624 // Batch 0, Channel 1, Height (1) x Width (1)
6625 0.00000002f,
6626
6627 // Batch 0, Channel 2, Height (1) x Width (1)
6628 0.00000003f,
6629 };
6630
6631 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
6632 std::vector<float> expectedOutputValues
6633 {
6634 // Batch 0, Channel 0, Height (1) x Width (1)
6635 0.00000001f * approxInvL2Norm,
6636 0.00000002f * approxInvL2Norm,
6637 0.00000003f * approxInvL2Norm,
6638 };
6639
6640 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6641 inputValues, outScale, outOffset, expectedOutputValues, layout,
6642 epsilon);
6643}
6644
6645
6646template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006647LayerTestResult<T, 4> L2Normalization1dTestCommon(
6648 armnn::IWorkloadFactory& workloadFactory,
6649 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006650 float scale,
6651 int32_t offset,
6652 float outScale,
6653 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006654 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006655{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006656 // Width: 1
6657 // Height: 1
6658 // Channels: 10
6659 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006660 unsigned int numberOfBatches = 1;
6661 unsigned int numberOfChannels = 10;
6662 unsigned int height = 1;
6663 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006664
jimfly013aab7c32018-11-12 13:32:08 +00006665
Nina Drozdd41b2592018-11-19 13:03:36 +00006666 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006667 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006668 std::vector<float> inputValues
6669 {
6670 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006671 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006672
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006673 // Batch 0, Channel 1, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006674 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006675
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006676 // Batch 0, Channel 2, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006677 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006678
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006679 // Batch 0, Channel 3, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006680 4.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006681
6682 // Batch 0, Channel 4, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006683 5.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006684
6685 // Batch 0, Channel 5, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006686 6.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006687
6688 // Batch 0, Channel 6, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006689 7.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006690
6691 // Batch 0, Channel 7, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006692 8.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006693
6694 // Batch 0, Channel 8, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006695 9.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006696
6697 // Batch 0, Channel 9, Height (1) x Width (1)
6698 10.0f
6699 };
telsoa014fcda012018-03-09 14:13:49 +00006700 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006701 std::vector<float> expectedOutputValues
6702 {
6703 // Batch 0, Channel 0, Height (1) x Width (1)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006704 1.0f * approxInvL2Norm,
6705 2.0f * approxInvL2Norm,
6706 3.0f * approxInvL2Norm,
6707 4.0f * approxInvL2Norm,
6708 5.0f * approxInvL2Norm,
6709 6.0f * approxInvL2Norm,
6710 7.0f * approxInvL2Norm,
6711 8.0f * approxInvL2Norm,
6712 9.0f * approxInvL2Norm,
telsoa014fcda012018-03-09 14:13:49 +00006713 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006714 };
telsoa014fcda012018-03-09 14:13:49 +00006715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006716
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006717 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6718 inputValues, outScale, outOffset, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006719}
6720
Ferran Balaguere52211e2019-06-17 12:23:52 +01006721LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
6722 armnn::IWorkloadFactory& workloadFactory,
6723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6724 const armnn::DataLayout layout)
6725{
6726 // Dummy descriptor to get the default value of epsilon.
6727 armnn::L2NormalizationDescriptor descriptor;
6728
6729 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6730 layout, descriptor.m_Eps);
6731}
6732
6733LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
6734 armnn::IWorkloadFactory& workloadFactory,
6735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6736 const armnn::DataLayout layout)
6737{
6738 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6739 layout, 1e-9f);
6740}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006741
6742LayerTestResult<float, 4> L2Normalization1dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006743 armnn::IWorkloadFactory& workloadFactory,
6744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006745 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006746{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006747 return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006748}
6749
6750LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
6751 armnn::IWorkloadFactory& workloadFactory,
6752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6753 const armnn::DataLayout layout)
6754{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006755 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006756 layout);
6757}
6758
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006759LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
6760 armnn::IWorkloadFactory& workloadFactory,
6761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6762 const armnn::DataLayout layout)
6763{
6764 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6765 1.f/128, 128, layout);
6766}
6767
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006768template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6769LayerTestResult<T, 4> L2Normalization2dTestCommon(
6770 armnn::IWorkloadFactory& workloadFactory,
6771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006772 float scale,
6773 int32_t offset,
6774 float outScale,
6775 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006776 const armnn::DataLayout layout)
6777{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006778 // Width: 5
6779 // Height: 1
6780 // Channels: 2
6781 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006782 unsigned int numberOfBatches = 1;
6783 unsigned int numberOfChannels = 2;
6784 unsigned int height = 1;
6785 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006786
Nina Drozdd41b2592018-11-19 13:03:36 +00006787 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006788 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006789 std::vector<float> inputValues
6790 {
6791 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006792 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006793
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006794 // Batch 0, Channel 1, Height (1) x Width (5)
6795 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6796 };
6797 std::vector<float> expectedOutputValues
6798 {
6799 // Batch 0, Channel 0, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006800 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6801 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6802 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6803 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
6804 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006805
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006806 // Batch 0, Channel 1, Height (1) x Width (5)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006807 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6808 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6809 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6810 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006811 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006812 };
telsoa014fcda012018-03-09 14:13:49 +00006813
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006814 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6815 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006816}
telsoa014fcda012018-03-09 14:13:49 +00006817
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006818LayerTestResult<float, 4> L2Normalization2dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006819 armnn::IWorkloadFactory& workloadFactory,
6820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006821 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006822{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006823 return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6824 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006825}
6826
6827LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
6828 armnn::IWorkloadFactory& workloadFactory,
6829 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6830 const armnn::DataLayout layout)
6831{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006832 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006833 layout);
6834}
6835
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006836LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
6837 armnn::IWorkloadFactory& workloadFactory,
6838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6839 const armnn::DataLayout layout)
6840{
6841 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6842 1.f/128, 128, layout);
6843}
6844
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006845template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6846LayerTestResult<T, 4> L2Normalization3dTestCommon(
6847 armnn::IWorkloadFactory& workloadFactory,
6848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006849 float scale,
6850 int32_t offset,
6851 float outScale,
6852 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006853 const armnn::DataLayout layout)
6854{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006855 // Width: 3
6856 // Height: 4
6857 // Channels: 2
6858 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006859 unsigned int numberOfBatches = 1;
6860 unsigned int numberOfChannels = 2;
6861 unsigned int height = 4;
6862 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006863
Nina Drozdd41b2592018-11-19 13:03:36 +00006864 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006865 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006866 std::vector<float> inputValues
6867 {
6868 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006869 119.0f, 21.0f, 150.0f,
6870 149.0f, 32.0f, 179.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006871 15.0f, 227.0f, 141.0f,
telsoa014fcda012018-03-09 14:13:49 +00006872 147.0f, 199.0f, 220.0f,
6873
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006874 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006875 110.0f, 140.0f, 73.0f,
6876 211.0f, 212.0f, 89.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006877 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006878 162.0f, 12.0f, 161.0f
6879 };
6880 std::vector<float> expectedOutputValues
6881 {
6882 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006883 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006884 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006885 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6886 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006887 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006888 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006889 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006890 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6891 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6892 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6893 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6894 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6895
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006896 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006897 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6898 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006899 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006900 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6901 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006902 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6903 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006904 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6905 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6906 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006907 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006908 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6909 };
telsoa014fcda012018-03-09 14:13:49 +00006910
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006911 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
6912 inputValues, outScale, outOffset, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006913}
telsoa014fcda012018-03-09 14:13:49 +00006914
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006915LayerTestResult<float, 4> L2Normalization3dTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006916 armnn::IWorkloadFactory& workloadFactory,
6917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006918 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006919{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006920 return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
6921 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006922}
6923
6924LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
6925 armnn::IWorkloadFactory& workloadFactory,
6926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6927 const armnn::DataLayout layout)
6928{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006929 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006930 layout);
6931}
6932
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006933LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
6934 armnn::IWorkloadFactory& workloadFactory,
6935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6936 const armnn::DataLayout layout)
6937{
6938 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
6939 1.f/128, 128, layout);
6940}
6941
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006942template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
6943LayerTestResult<T, 4> L2Normalization4dTestCommon(
6944 armnn::IWorkloadFactory& workloadFactory,
6945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Ferran Balaguerc6138d82019-06-13 17:23:50 +01006946 float scale,
6947 int32_t offset,
6948 float outScale,
6949 int32_t outOffset,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006950 const armnn::DataLayout layout)
6951{
6952 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006953 // Height: 4
6954 // Channels: 3
6955 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006956 unsigned int numberOfBatches = 2;
6957 unsigned int numberOfChannels = 3;
6958 unsigned int height = 4;
6959 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006960
Nina Drozdd41b2592018-11-19 13:03:36 +00006961 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006962 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006963 std::vector<float> inputValues
6964 {
6965 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006966 235.0f, 46.0f, 178.0f,
6967 100.0f, 123.0f, 19.0f,
6968 172.0f, 74.0f, 250.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006969 6.0f, 195.0f, 80.0f,
telsoa014fcda012018-03-09 14:13:49 +00006970
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006971 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006972 113.0f, 95.0f, 202.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006973 77.0f, 114.0f, 71.0f,
telsoa014fcda012018-03-09 14:13:49 +00006974 122.0f, 246.0f, 166.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006975 82.0f, 28.0f, 37.0f,
telsoa014fcda012018-03-09 14:13:49 +00006976
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006977 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006978 56.0f, 170.0f, 162.0f,
telsoa014fcda012018-03-09 14:13:49 +00006979 194.0f, 89.0f, 254.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006980 12.0f, 209.0f, 200.0f,
6981 1.0f, 64.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00006982
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006983 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006984 67.0f, 90.0f, 49.0f,
6985 7.0f, 163.0f, 18.0f,
6986 25.0f, 117.0f, 103.0f,
telsoa014fcda012018-03-09 14:13:49 +00006987 247.0f, 59.0f, 189.0f,
6988
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006989 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006990 239.0f, 104.0f, 199.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006991 17.0f, 124.0f, 153.0f,
telsoa014fcda012018-03-09 14:13:49 +00006992 222.0f, 217.0f, 75.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006993 32.0f, 126.0f, 21.0f,
telsoa014fcda012018-03-09 14:13:49 +00006994
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006995 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006996 97.0f, 145.0f, 215.0f,
telsoa014fcda012018-03-09 14:13:49 +00006997 115.0f, 116.0f, 238.0f,
6998 226.0f, 16.0f, 132.0f,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01006999 92.0f, 125.0f, 88.0f
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007000 };
7001 std::vector<float> expectedOutputValues
7002 {
7003 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007004 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007005 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007006 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7007 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
7008 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007009 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007010 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007011 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007012 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007013 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007014 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007015 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007016
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007017 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007018 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007019 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007020 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007021 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007022 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007023 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007024 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
7025 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7026 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007027 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7028 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7029 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007030
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007031 // Batch 0, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007032 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007033 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
7034 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
7035 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007036 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007037 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007038 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007039 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
7040 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007041 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
7042 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
7043 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007044
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007045 // Batch 1, Channel 0, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007046 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7047 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7048 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7049 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007050 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007051 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7052 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007053 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
7054 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7055 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007056 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007057 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
7058
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007059 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00007060 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
7061 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7062 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007063 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007064 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7065 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7066 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
7067 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007068 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
7069 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007070 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007071 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007072
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007073 // Batch 1, Channel 2, Height (4) x Width (3)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007074 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007075 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
7076 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
7077 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
7078 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
7079 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
7080 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007081 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007082 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007083 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
telsoa014fcda012018-03-09 14:13:49 +00007084 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007085 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01007086 };
telsoa014fcda012018-03-09 14:13:49 +00007087
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007088 return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
7089 inputValues, outScale, outOffset, expectedOutputValues, layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007090}
7091
7092LayerTestResult<float, 4> L2Normalization4dTest(
7093 armnn::IWorkloadFactory& workloadFactory,
7094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7095 const armnn::DataLayout layout)
7096{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007097 return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
7098 layout);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007099}
7100
7101LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
7102 armnn::IWorkloadFactory& workloadFactory,
7103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7104 const armnn::DataLayout layout)
7105{
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007106 return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007107 layout);
telsoa014fcda012018-03-09 14:13:49 +00007108}
7109
Ferran Balaguerc6138d82019-06-13 17:23:50 +01007110LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
7111 armnn::IWorkloadFactory& workloadFactory,
7112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7113 const armnn::DataLayout layout)
7114{
7115 return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
7116 1.f/128, 128, layout);
7117}
7118
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007119template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007120LayerTestResult<T, 4> ConstantTestImpl(
7121 armnn::IWorkloadFactory& workloadFactory,
7122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00007123 float qScale,
7124 int32_t qOffset)
7125{
7126 constexpr unsigned int inputWidth = 3;
7127 constexpr unsigned int inputHeight = 4;
7128 constexpr unsigned int inputChannels = 3;
7129 constexpr unsigned int inputBatchSize = 2;
7130
7131 constexpr unsigned int outputWidth = inputWidth;
7132 constexpr unsigned int outputHeight = inputHeight;
7133 constexpr unsigned int outputChannels = inputChannels;
7134 constexpr unsigned int outputBatchSize = inputBatchSize;
7135
Nina Drozd58ef2c62019-05-16 12:09:18 +01007136 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7137 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007138
Nina Drozd58ef2c62019-05-16 12:09:18 +01007139 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7140 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00007141
7142 // Set quantization parameters if the requested type is a quantized type.
7143 if(armnn::IsQuantizedType<T>())
7144 {
7145 inputTensorInfo.SetQuantizationScale(qScale);
7146 inputTensorInfo.SetQuantizationOffset(qOffset);
7147 outputTensorInfo.SetQuantizationScale(qScale);
7148 outputTensorInfo.SetQuantizationOffset(qOffset);
7149 }
7150
7151 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
7152 QuantizedVector<T>(qScale, qOffset, {
7153 // Batch 0, Channel 0
7154 235.0f, 46.0f, 178.0f,
7155 100.0f, 123.0f, 19.0f,
7156 172.0f, 74.0f, 250.0f,
7157 6.0f, 195.0f, 80.0f,
7158
7159 // Batch 0, Channel 1
7160 113.0f, 95.0f, 202.0f,
7161 77.0f, 114.0f, 71.0f,
7162 122.0f, 246.0f, 166.0f,
7163 82.0f, 28.0f, 37.0f,
7164
7165 // Batch 0, Channel 2
7166 56.0f, 170.0f, 162.0f,
7167 194.0f, 89.0f, 254.0f,
7168 12.0f, 209.0f, 200.0f,
7169 1.0f, 64.0f, 54.0f,
7170
7171 // Batch 1, Channel 0
7172 67.0f, 90.0f, 49.0f,
7173 7.0f, 163.0f, 18.0f,
7174 25.0f, 117.0f, 103.0f,
7175 247.0f, 59.0f, 189.0f,
7176
7177 // Batch 1, Channel 1
7178 239.0f, 104.0f, 199.0f,
7179 17.0f, 124.0f, 153.0f,
7180 222.0f, 217.0f, 75.0f,
7181 32.0f, 126.0f, 21.0f,
7182
7183 // Batch 1, Channel 2
7184 97.0f, 145.0f, 215.0f,
7185 115.0f, 116.0f, 238.0f,
7186 226.0f, 16.0f, 132.0f,
7187 92.0f, 125.0f, 88.0f,
7188 })));
7189
7190 LayerTestResult<T, 4> result(outputTensorInfo);
7191 result.outputExpected = input;
7192
7193 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7194
7195 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
7196 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
7197
7198 armnn::ConstantQueueDescriptor descriptor;
7199 descriptor.m_LayerOutput = &constantTensor;
7200
7201 armnn::WorkloadInfo info;
7202 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7203
7204 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
7205
7206 outputHandle->Allocate();
7207
Derek Lambertif30f7d32019-04-09 10:25:02 +01007208 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007209 workload->Execute();
7210
7211 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7212 return result;
7213}
7214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007215LayerTestResult<float, 4> ConstantTest(
7216 armnn::IWorkloadFactory& workloadFactory,
7217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007218{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007219 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007220}
7221
Nina Drozd58ef2c62019-05-16 12:09:18 +01007222LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
7223 armnn::IWorkloadFactory& workloadFactory,
7224 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7225{
7226 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
7227}
7228
7229LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007230 armnn::IWorkloadFactory& workloadFactory,
7231 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007232{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00007233 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00007234}
7235
Jim Flynn4ed6c832019-05-20 11:02:46 +01007236LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00007237 armnn::IWorkloadFactory& workloadFactory,
7238 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7239{
7240 unsigned int outputWidth = 3;
7241 unsigned int outputHeight = 6;
7242 unsigned int outputChannels = 3;
7243
7244 unsigned int inputWidth1 = 3;
7245 unsigned int inputHeight1 = 6;
7246 unsigned int inputChannels1 = 2;
7247
7248 unsigned int inputWidth2 = 3;
7249 unsigned int inputHeight2 = 6;
7250 unsigned int inputChannels2 = 1;
7251
7252 // Defines the tensor descriptors.
7253 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7254 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7255 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
7256
7257 // Quantized input1 tensor. Range [-3, 1]
7258 const float inputScale1 = 0.015686f;
7259 const int32_t inputOffset1 = 192;
7260
7261 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7262 {
7263 1, 2, 3,
7264 4, 5, 6,
7265 7, 8, 9,
7266 10, 11, 12,
7267 13, 14, 15,
7268 16, 17, 18,
7269
7270 19, 20, 21,
7271 22, 23, 24,
7272 25, 26, 27,
7273 28, 29, 30,
7274 31, 32, 33,
7275 34, 35, 36,
7276 })
7277 );
7278
7279 // Quatized input2 tensor. Range [-1, 4]
7280 const float inputScale2 = 0.019608f;
7281 const int32_t inputOffset2 = 50;
7282
7283 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7284 {
7285 37, 38, 39,
7286 40, 41, 42,
7287 43, 44, 45,
7288 46, 47, 48,
7289 49, 50, 51,
7290 52, 53, 54,
7291 })
7292 );
7293
7294 // Output has the same quantization parameters than input1,
7295 // so that only the requantization of input2 is required
7296 const float outputScale = 0.015686f;
7297 const int32_t outputOffset = 192;
7298
7299 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7300
7301 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
7302 {
7303 1, 2, 3,
7304 4, 5, 6,
7305 7, 8, 9,
7306 10, 11, 12,
7307 13, 14, 15,
7308 16, 17, 18,
7309
7310 19, 20, 21,
7311 22, 23, 24,
7312 25, 26, 27,
7313 28, 29, 30,
7314 31, 32, 33,
7315 34, 35, 36,
7316
7317 176, 177, 178,
7318 179, 181, 182,
7319 183, 184, 186,
7320 187, 188, 189,
7321 191, 192, 193,
7322 195, 196, 197,
7323 })
7324 );
7325
7326 outputTensorInfo.SetQuantizationScale(outputScale);
7327 outputTensorInfo.SetQuantizationOffset(outputOffset);
7328 inputTensorInfo1.SetQuantizationScale(inputScale1);
7329 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
7330 inputTensorInfo2.SetQuantizationScale(inputScale2);
7331 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
7332
7333 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007334 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007335
7336 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007337 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007338
7339 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7340
7341 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7342
7343 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7344 subTensorsSupported ?
7345 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7346 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7347
7348 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7349 subTensorsSupported ?
7350 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7351 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7352
Jim Flynne242f2d2019-05-22 14:24:13 +01007353 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00007354 armnn::WorkloadInfo info;
7355 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7356 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7357 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7358
7359 data.m_ViewOrigins.push_back(window1);
7360 data.m_ViewOrigins.push_back(window2);
7361
Jim Flynn4ed6c832019-05-20 11:02:46 +01007362 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00007363
7364 inputHandle1->Allocate();
7365 inputHandle2->Allocate();
7366 outputHandle->Allocate();
7367
7368 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7369 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7370
Derek Lambertif30f7d32019-04-09 10:25:02 +01007371 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00007372 workload->Execute();
7373
7374 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7375
7376 return ret;
7377}
7378
Jim Flynn4ed6c832019-05-20 11:02:46 +01007379LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007380 armnn::IWorkloadFactory& workloadFactory,
7381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007382{
surmeh013537c2c2018-05-18 16:31:43 +01007383 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00007384 unsigned int outputHeight = 6;
7385 unsigned int outputChannels = 3;
7386
surmeh013537c2c2018-05-18 16:31:43 +01007387 unsigned int inputWidth1 = 3;
7388 unsigned int inputHeight1 = 6;
7389 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00007390
surmeh013537c2c2018-05-18 16:31:43 +01007391 unsigned int inputWidth2 = 3;
7392 unsigned int inputHeight2 = 6;
7393 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00007394
telsoa01c577f2c2018-08-31 09:22:23 +01007395 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00007396 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
7397 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
7398 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00007399
Jim Flynn4ed6c832019-05-20 11:02:46 +01007400 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00007401 const float scale = 0.13497836f;
7402 const int32_t offset = -7;
7403
7404 outputTensorInfo.SetQuantizationScale(scale);
7405 outputTensorInfo.SetQuantizationOffset(offset);
7406 inputTensorInfo1.SetQuantizationScale(scale);
7407 inputTensorInfo1.SetQuantizationOffset(offset);
7408 inputTensorInfo2.SetQuantizationScale(scale);
7409 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00007410
7411 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
7412
7413 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01007414 {
7415 1, 2, 3,
7416 4, 5, 6,
7417 7, 8, 9,
7418 10, 11, 12,
7419 13, 14, 15,
7420 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007421
surmeh013537c2c2018-05-18 16:31:43 +01007422 19, 20, 21,
7423 22, 23, 24,
7424 25, 26, 27,
7425 28, 29, 30,
7426 31, 32, 33,
7427 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007428
surmeh013537c2c2018-05-18 16:31:43 +01007429 37, 38, 39,
7430 40, 41, 42,
7431 43, 44, 45,
7432 46, 47, 48,
7433 49, 50, 51,
7434 52, 53, 54,
7435 })
telsoa014fcda012018-03-09 14:13:49 +00007436 );
7437
telsoa014fcda012018-03-09 14:13:49 +00007438 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
7439 {
surmeh013537c2c2018-05-18 16:31:43 +01007440 1, 2, 3,
7441 4, 5, 6,
7442 7, 8, 9,
7443 10, 11, 12,
7444 13, 14, 15,
7445 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00007446
surmeh013537c2c2018-05-18 16:31:43 +01007447 19, 20, 21,
7448 22, 23, 24,
7449 25, 26, 27,
7450 28, 29, 30,
7451 31, 32, 33,
7452 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00007453 })
7454 );
7455
7456 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
7457 {
surmeh013537c2c2018-05-18 16:31:43 +01007458 37, 38, 39,
7459 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00007460 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01007461 46, 47, 48,
7462 49, 50, 51,
7463 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00007464 })
7465 );
7466
telsoa01c577f2c2018-08-31 09:22:23 +01007467 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007468 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00007469
telsoa01c577f2c2018-08-31 09:22:23 +01007470 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007471 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00007472
telsoa014fcda012018-03-09 14:13:49 +00007473
7474 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7475
7476 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7477
7478 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7479 subTensorsSupported ?
7480 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7481 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7482
7483 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7484 subTensorsSupported ?
7485 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7486 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7487
telsoa014fcda012018-03-09 14:13:49 +00007488
Jim Flynne242f2d2019-05-22 14:24:13 +01007489 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00007490 armnn::WorkloadInfo info;
7491 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7492 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00007493 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7494
7495 data.m_ViewOrigins.push_back(window1);
7496 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00007497
Jim Flynn4ed6c832019-05-20 11:02:46 +01007498 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00007499
7500 inputHandle1->Allocate();
7501 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007502 outputHandle->Allocate();
7503
7504 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7505 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007506
Derek Lambertif30f7d32019-04-09 10:25:02 +01007507 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007508 workload->Execute();
7509
7510 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7511
7512 return ret;
7513}
7514
Jim Flynn4ed6c832019-05-20 11:02:46 +01007515LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01007516 armnn::IWorkloadFactory& workloadFactory,
7517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7518{
7519 unsigned int outputWidth = 3;
7520 unsigned int outputHeight = 6;
7521 unsigned int outputChannels = 3;
7522
7523 unsigned int inputWidth1 = 3;
7524 unsigned int inputHeight1 = 6;
7525 unsigned int inputChannels1 = 2;
7526
7527 unsigned int inputWidth2 = 3;
7528 unsigned int inputHeight2 = 6;
7529 unsigned int inputChannels2 = 1;
7530
7531 // Defines the tensor descriptors.
7532 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
7533 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
7534 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
7535
Jim Flynn4ed6c832019-05-20 11:02:46 +01007536 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01007537 const float scale = 0.13497836f;
7538 const int32_t offset = -7;
7539
7540 outputTensorInfo.SetQuantizationScale(scale);
7541 outputTensorInfo.SetQuantizationOffset(offset);
7542 inputTensorInfo1.SetQuantizationScale(scale);
7543 inputTensorInfo1.SetQuantizationOffset(offset);
7544 inputTensorInfo2.SetQuantizationScale(scale);
7545 inputTensorInfo2.SetQuantizationOffset(offset);
7546
7547 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
7548
7549 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
7550 {
7551 1, 2, 3,
7552 4, 5, 6,
7553 7, 8, 9,
7554 10, 11, 12,
7555 13, 14, 15,
7556 16, 17, 18,
7557
7558 19, 20, 21,
7559 22, 23, 24,
7560 25, 26, 27,
7561 28, 29, 30,
7562 31, 32, 33,
7563 34, 35, 36,
7564
7565 37, 38, 39,
7566 40, 41, 42,
7567 43, 44, 45,
7568 46, 47, 48,
7569 49, 50, 51,
7570 52, 53, 54,
7571 }));
7572
7573 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
7574 {
7575 1, 2, 3,
7576 4, 5, 6,
7577 7, 8, 9,
7578 10, 11, 12,
7579 13, 14, 15,
7580 16, 17, 18,
7581
7582 19, 20, 21,
7583 22, 23, 24,
7584 25, 26, 27,
7585 28, 29, 30,
7586 31, 32, 33,
7587 34, 35, 36,
7588 }));
7589
7590 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
7591 {
7592 37, 38, 39,
7593 40, 41, 42,
7594 43, 44, 45,
7595 46, 47, 48,
7596 49, 50, 51,
7597 52, 53, 54,
7598 }));
7599
7600 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01007601 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007602
7603 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01007604 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007605
7606
7607 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7608
7609 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
7610
7611 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
7612 subTensorsSupported ?
7613 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
7614 workloadFactory.CreateTensorHandle(inputTensorInfo1);
7615
7616 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
7617 subTensorsSupported ?
7618 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
7619 workloadFactory.CreateTensorHandle(inputTensorInfo2);
7620
7621
Jim Flynne242f2d2019-05-22 14:24:13 +01007622 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01007623 armnn::WorkloadInfo info;
7624 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7625 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
7626 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7627
7628 data.m_ViewOrigins.push_back(window1);
7629 data.m_ViewOrigins.push_back(window2);
7630
Jim Flynn4ed6c832019-05-20 11:02:46 +01007631 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01007632
7633 inputHandle1->Allocate();
7634 inputHandle2->Allocate();
7635 outputHandle->Allocate();
7636
7637 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
7638 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
7639
7640 workload->PostAllocationConfigure();
7641 workload->Execute();
7642
7643 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
7644
7645 return ret;
7646}
telsoa014fcda012018-03-09 14:13:49 +00007647
surmeh01bceff2f2018-03-29 16:29:27 +01007648namespace
telsoa014fcda012018-03-09 14:13:49 +00007649{
Sadik Armagan2999a022019-04-09 14:20:12 +01007650template <typename T>
7651LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007652 armnn::IWorkloadFactory& workloadFactory,
7653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7654 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007655 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007656 float scale0,
7657 int32_t offset0,
7658 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007659 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007660 float scale1,
7661 int32_t offset1,
7662 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01007663 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007664 float outScale,
7665 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01007666{
Sadik Armagan2999a022019-04-09 14:20:12 +01007667 auto dataType = (std::is_same<T, uint8_t>::value ?
7668 armnn::DataType::QuantisedAsymm8 :
7669 armnn::DataType::QuantisedSymm16);
7670
7671 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
7672 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
7673 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00007674
surmeh01bceff2f2018-03-29 16:29:27 +01007675 inputTensorInfo0.SetQuantizationScale(scale0);
7676 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00007677
surmeh01bceff2f2018-03-29 16:29:27 +01007678 inputTensorInfo1.SetQuantizationScale(scale1);
7679 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00007680
surmeh01bceff2f2018-03-29 16:29:27 +01007681 outputTensorInfo.SetQuantizationScale(outScale);
7682 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007683
Sadik Armagan2999a022019-04-09 14:20:12 +01007684 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7685 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007686
Sadik Armagan2999a022019-04-09 14:20:12 +01007687 LayerTestResult<T, 4> result(outputTensorInfo);
7688 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7689
7690 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7691 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7692 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7693
7694 armnn::AdditionQueueDescriptor data;
7695 armnn::WorkloadInfo info;
7696 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7697 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7698 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7699
7700 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7701
7702 inputHandle0->Allocate();
7703 inputHandle1->Allocate();
7704 outputHandle->Allocate();
7705
7706 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7707 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7708
Derek Lambertif30f7d32019-04-09 10:25:02 +01007709 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007710 workload->Execute();
7711
7712 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7713
7714 return result;
7715}
7716} // anonymous namespace
7717
7718LayerTestResult<uint8_t, 4> AdditionUint8Test(
7719 armnn::IWorkloadFactory& workloadFactory,
7720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7721{
7722 const unsigned int shape0[] = { 1, 2, 2, 3 };
7723 const unsigned int shape1[] = { 1, 2, 2, 3 };
7724
7725 std::vector<uint8_t> input0(
7726 {
7727 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7728 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7729 });
7730
7731 std::vector<uint8_t> input1(
7732 {
7733 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7734 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7735 });
7736
7737 std::vector<uint8_t> output(
7738 {
7739 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7740 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7741 });
7742
7743 return AdditionQuantizeTestHelper(workloadFactory,
7744 memoryManager,
7745 shape0, input0, 7.0f, 3,
7746 shape1, input1, 7.0f, 3,
7747 shape0, output, 7.0f, 3);
7748}
7749
7750LayerTestResult<int16_t, 4> AdditionInt16Test(
7751 armnn::IWorkloadFactory& workloadFactory,
7752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7753{
7754 const unsigned int shape0[] = { 1, 2, 2, 3 };
7755 const unsigned int shape1[] = { 1, 2, 2, 3 };
7756
7757 std::vector<int16_t> input0(
7758 {
7759 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7760 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7761 });
7762
7763 std::vector<int16_t> input1(
7764 {
7765 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7766 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7767 });
7768
7769 std::vector<int16_t> output(
7770 {
7771 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7772 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7773 });
7774
7775 return AdditionQuantizeTestHelper(workloadFactory,
7776 memoryManager,
7777 shape0, input0, 7.0f, 0,
7778 shape1, input1, 7.0f, 0,
7779 shape0, output, 7.0f, 0);
7780}
7781
7782namespace
7783{
7784template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7785LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7786 armnn::IWorkloadFactory& workloadFactory,
7787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7788 const unsigned int shape0[4],
7789 const std::vector<T> & values0,
7790 float scale0,
7791 int32_t offset0,
7792 const unsigned int shape1[4],
7793 const std::vector<T> & values1,
7794 float scale1,
7795 int32_t offset1,
7796 const unsigned int outShape[4],
7797 const std::vector<T> & outValues,
7798 float outScale,
7799 int32_t outOffset)
7800{
7801 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7802 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7803 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7804
7805 inputTensorInfo0.SetQuantizationScale(scale0);
7806 inputTensorInfo0.SetQuantizationOffset(offset0);
7807
7808 inputTensorInfo1.SetQuantizationScale(scale1);
7809 inputTensorInfo1.SetQuantizationOffset(offset1);
7810
7811 outputTensorInfo.SetQuantizationScale(outScale);
7812 outputTensorInfo.SetQuantizationOffset(outOffset);
7813
7814 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7815 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7816
7817 LayerTestResult<T, 4> result(outputTensorInfo);
7818 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007819
surmeh01bceff2f2018-03-29 16:29:27 +01007820 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007821 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7823
7824 armnn::MultiplicationQueueDescriptor data;
7825 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007826 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7827 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007828 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7829
7830 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7831
surmeh01bceff2f2018-03-29 16:29:27 +01007832 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007833 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007834 outputHandle->Allocate();
7835
surmeh01bceff2f2018-03-29 16:29:27 +01007836 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007837 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007838
Derek Lambertif30f7d32019-04-09 10:25:02 +01007839 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007840 workload->Execute();
7841
7842 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7843
7844 return result;
7845}
surmeh01bceff2f2018-03-29 16:29:27 +01007846} // anonymous namespace
7847
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007848LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7849 armnn::IWorkloadFactory& workloadFactory,
7850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007851{
7852 unsigned int batchSize = 1;
7853 unsigned int channels = 2;
7854 unsigned int height = 2;
7855 unsigned int width = 3;
7856 const unsigned int shape[] = { batchSize, channels, height, width };
7857
telsoa01c577f2c2018-08-31 09:22:23 +01007858 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007859 std::vector<uint8_t> input0({
7860 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7861 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7862 });
7863
telsoa01c577f2c2018-08-31 09:22:23 +01007864 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007865 std::vector<uint8_t> input1({
7866 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7867 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7868 });
7869
telsoa01c577f2c2018-08-31 09:22:23 +01007870 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007871 std::vector<uint8_t> output(
7872 {
7873 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7874 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7875 });
7876
Sadik Armagan2999a022019-04-09 14:20:12 +01007877 // Scale/offset chosen to have output values out of range.
7878 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7879 memoryManager,
7880 shape,
7881 input0,
7882 4.0f,
7883 1,
7884 shape,
7885 input1,
7886 3.0f,
7887 -2,
7888 shape,
7889 output,
7890 1366.255f,
7891 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007892}
7893
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007894LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7895 armnn::IWorkloadFactory& workloadFactory,
7896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007897{
7898 const unsigned int shape0[] = { 1, 2, 2, 3 };
7899 const unsigned int shape1[] = { 1, 1, 1, 1 };
7900
7901 std::vector<uint8_t> input0({
7902 1, 2, 3, 4, 5, 6,
7903 7, 8, 9, 10, 11, 12
7904 });
7905
7906 std::vector<uint8_t> input1({2});
7907
7908 std::vector<uint8_t> output({
7909 2, 4, 6, 8, 10, 12,
7910 14, 16, 18, 20, 22, 24
7911 });
7912
Sadik Armagan2999a022019-04-09 14:20:12 +01007913 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7914 memoryManager,
7915 shape0,
7916 input0,
7917 1.0f,
7918 0,
7919 shape1,
7920 input1,
7921 1.0f,
7922 0,
7923 shape0,
7924 output,
7925 1.0f,
7926 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007927}
7928
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007929LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7930 armnn::IWorkloadFactory& workloadFactory,
7931 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007932{
7933 const unsigned int shape0[] = { 1, 2, 2, 3 };
7934 const unsigned int shape1[] = { 1, 1, 1, 3 };
7935
7936 std::vector<uint8_t> input0({
7937 1, 2, 3, 4, 5, 6,
7938 7, 8, 9, 10, 11, 12
7939 });
7940
7941 std::vector<uint8_t> input1({1, 2, 3});
7942
7943 std::vector<uint8_t> output({
7944 1, 4, 9, 4, 10, 18,
7945 7, 16, 27, 10, 22, 36
7946 });
7947
Sadik Armagan2999a022019-04-09 14:20:12 +01007948 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7949 memoryManager,
7950 shape0,
7951 input0,
7952 1.0f,
7953 0,
7954 shape1,
7955 input1,
7956 1.0f,
7957 0,
7958 shape0,
7959 output,
7960 1.0f,
7961 0);
7962}
7963
7964LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7965 armnn::IWorkloadFactory& workloadFactory,
7966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7967{
7968 const unsigned int shape[] = { 1, 2, 2, 3 };
7969
7970 std::vector<int16_t> input0(
7971 {
7972 6, 7, 8, 9, 10, 11,
7973 12, 13, 14, 15, 16, 17
7974 });
7975
7976 std::vector<int16_t> input1(
7977 {
7978 1, 2, 3, 4, 5, 6,
7979 7, 8, 9, 10, 11, 12
7980 });
7981
7982 std::vector<int16_t> output(
7983 {
7984 6, 14, 24, 36, 50, 66,
7985 84, 104, 126, 150, 176, 204
7986 });
7987
7988 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7989 memoryManager,
7990 shape,
7991 input0,
7992 1.0f,
7993 0,
7994 shape,
7995 input1,
7996 1.0f,
7997 0,
7998 shape,
7999 output,
8000 1.0f,
8001 0);
8002}
8003
8004LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
8005 armnn::IWorkloadFactory& workloadFactory,
8006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8007{
8008 const unsigned int shape0[] = { 1, 2, 2, 3 };
8009 const unsigned int shape1[] = { 1, 1, 1, 1 };
8010
8011 std::vector<int16_t> input0(
8012 {
8013 1, 2, 3, 4, 5, 6,
8014 7, 8, 9, 10, 11, 12
8015 });
8016
8017 std::vector<int16_t> input1({2});
8018
8019 std::vector<int16_t> output(
8020 {
8021 2, 4, 6, 8, 10, 12,
8022 14, 16, 18, 20, 22, 24
8023 });
8024
8025 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8026 memoryManager,
8027 shape0,
8028 input0,
8029 1.0f,
8030 0,
8031 shape1,
8032 input1,
8033 1.0f,
8034 0,
8035 shape0,
8036 output,
8037 1.0f,
8038 0);
8039}
8040
8041LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
8042 armnn::IWorkloadFactory& workloadFactory,
8043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8044{
8045 const unsigned int shape0[] = { 1, 2, 2, 3 };
8046 const unsigned int shape1[] = { 1, 1, 1, 3 };
8047
8048 std::vector<int16_t> input0(
8049 {
8050 1, 2, 3, 4, 5, 6,
8051 7, 8, 9, 10, 11, 12
8052 });
8053
8054 std::vector<int16_t> input1({1, 2, 3});
8055
8056 std::vector<int16_t> output(
8057 {
8058 1, 4, 9, 4, 10, 18,
8059 7, 16, 27, 10, 22, 36
8060 });
8061
8062 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8063 memoryManager,
8064 shape0,
8065 input0,
8066 1.0f,
8067 0,
8068 shape1,
8069 input1,
8070 1.0f,
8071 0,
8072 shape0,
8073 output,
8074 1.0f,
8075 0);
surmeh01bceff2f2018-03-29 16:29:27 +01008076}
telsoa014fcda012018-03-09 14:13:49 +00008077
David Beckf195f032018-09-06 16:46:34 +01008078namespace
8079{
Sadik Armagan2999a022019-04-09 14:20:12 +01008080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008081LayerTestResult<T, 4> SubtractionTestHelper(
8082 armnn::IWorkloadFactory& workloadFactory,
8083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8084 const unsigned int shape0[4],
8085 const std::vector<T>& values0,
8086 float scale0,
8087 int32_t offset0,
8088 const unsigned int shape1[4],
8089 const std::vector<T> & values1,
8090 float scale1,
8091 int32_t offset1,
8092 const unsigned int outShape[4],
8093 const std::vector<T> & outValues,
8094 float outScale,
8095 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01008096{
Sadik Armagan2999a022019-04-09 14:20:12 +01008097 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
8098 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
8099 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01008100
8101 inputTensorInfo0.SetQuantizationScale(scale0);
8102 inputTensorInfo0.SetQuantizationOffset(offset0);
8103
8104 inputTensorInfo1.SetQuantizationScale(scale1);
8105 inputTensorInfo1.SetQuantizationOffset(offset1);
8106
8107 outputTensorInfo.SetQuantizationScale(outScale);
8108 outputTensorInfo.SetQuantizationOffset(outOffset);
8109
8110 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
8111 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
8112
8113 LayerTestResult<T, 4> result(outputTensorInfo);
8114 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
8115
8116 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
8117 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
8118 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8119
8120 armnn::SubtractionQueueDescriptor data;
8121 armnn::WorkloadInfo info;
8122 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
8123 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
8124 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8125
8126 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
8127
8128 inputHandle0->Allocate();
8129 inputHandle1->Allocate();
8130 outputHandle->Allocate();
8131
8132 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
8133 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
8134
Derek Lambertif30f7d32019-04-09 10:25:02 +01008135 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01008136 workload->Execute();
8137
8138 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
8139
8140 return result;
8141}
8142} // anonymous namespace
8143
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008144LayerTestResult<uint8_t, 4> SubtractionUint8Test(
8145 armnn::IWorkloadFactory& workloadFactory,
8146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008147{
8148 const unsigned int shape0[] = { 1, 1, 2, 2 };
8149 const unsigned int shape1[] = { 1, 1, 2, 2 };
8150
8151 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8152 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
8153 std::vector<uint8_t> output({ 3, 3, 5, 5 });
8154
Sadik Armagan2999a022019-04-09 14:20:12 +01008155 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8156 memoryManager,
8157 shape0, input0, 0.5f, 2,
8158 shape1, input1, 1.0f, 0,
8159 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008160}
8161
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008162LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
8163 armnn::IWorkloadFactory& workloadFactory,
8164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008165{
8166 const unsigned int shape0[] = { 1, 1, 2, 2 };
8167 const unsigned int shape1[] = { 1, 1, 1, 1 };
8168
8169 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8170 std::vector<uint8_t> input1({ 2 });
8171 std::vector<uint8_t> output({ 5, 6, 7, 8 });
8172
Sadik Armagan2999a022019-04-09 14:20:12 +01008173 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8174 memoryManager,
8175 shape0, input0, 0.5f, 2,
8176 shape1, input1, 1.0f, 0,
8177 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01008178}
8179
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008180LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
8181 armnn::IWorkloadFactory& workloadFactory,
8182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008183{
8184 const unsigned int shape0[] = { 1, 1, 2, 2 };
8185 const unsigned int shape1[] = { 1, 1, 2, 1 };
8186
8187 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
8188 std::vector<uint8_t> input1({ 2, 1 });
8189 std::vector<uint8_t> output({ 8, 11, 12, 15 });
8190
Sadik Armagan2999a022019-04-09 14:20:12 +01008191 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
8192 memoryManager,
8193 shape0, input0, 1.0f, 0,
8194 shape1, input1, 1.0f, 0,
8195 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008196}
8197
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008198LayerTestResult<float, 4> SubtractionTest(
8199 armnn::IWorkloadFactory& workloadFactory,
8200 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008201{
8202 const unsigned int shape0[] = { 1, 1, 2, 2 };
8203 const unsigned int shape1[] = { 1, 1, 2, 2 };
8204
8205 std::vector<float> input0({ 1, 2, 3, 4 });
8206 std::vector<float> input1({ 1, -1, 0, 2 });
8207 std::vector<float> output({ 0, 3, 3, 2 });
8208
Sadik Armagan2999a022019-04-09 14:20:12 +01008209 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8210 memoryManager,
8211 shape0, input0, 1.0f, 0,
8212 shape1, input1, 1.0f, 0,
8213 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008214}
8215
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008216LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
8217 armnn::IWorkloadFactory& workloadFactory,
8218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008219{
8220 const unsigned int shape0[] = { 1, 1, 2, 2 };
8221 const unsigned int shape1[] = { 1, 1, 1, 1 };
8222
8223 std::vector<float> input0({ 1, 2, 3, 4 });
8224 std::vector<float> input1({ 10 });
8225 std::vector<float> output({ -9, -8, -7, -6 });
8226
Sadik Armagan2999a022019-04-09 14:20:12 +01008227 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8228 memoryManager,
8229 shape0, input0, 1.0f, 0,
8230 shape1, input1, 1.0f, 0,
8231 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008232}
8233
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008234LayerTestResult<float, 4> SubtractionBroadcastTest(
8235 armnn::IWorkloadFactory& workloadFactory,
8236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01008237{
8238 const unsigned int shape0[] = { 1, 1, 2, 2 };
8239 const unsigned int shape1[] = { 1, 1, 1, 2 };
8240
8241 std::vector<float> input0({ 1, 2, 3, 4 });
8242 std::vector<float> input1({ 10, -5 });
8243 std::vector<float> output({ -9, 7, -7, 9 });
8244
Sadik Armagan2999a022019-04-09 14:20:12 +01008245 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
8246 memoryManager,
8247 shape0, input0, 1.0f, 0,
8248 shape1, input1, 1.0f, 0,
8249 shape0, output, 1.0f, 0);
8250}
8251
8252LayerTestResult<int16_t, 4> SubtractionInt16Test(
8253 armnn::IWorkloadFactory& workloadFactory,
8254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8255{
8256 const unsigned int shape0[] = { 1, 1, 2, 2 };
8257 const unsigned int shape1[] = { 1, 1, 2, 2 };
8258
8259 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8260 std::vector<int16_t> input1({ 1, 2, 1, 2 });
8261 std::vector<int16_t> output({ 3, 3, 5, 5 });
8262
8263 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8264 memoryManager,
8265 shape0, input0, 0.5f, 0,
8266 shape1, input1, 1.0f, 0,
8267 shape0, output, 1.0f, 0);
8268}
8269
8270LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
8271 armnn::IWorkloadFactory& workloadFactory,
8272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8273{
8274 const unsigned int shape0[] = { 1, 1, 2, 2 };
8275 const unsigned int shape1[] = { 1, 1, 1, 1 };
8276
8277 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8278 std::vector<int16_t> input1({ 2 });
8279 std::vector<int16_t> output({ 3, 4, 5, 6 });
8280
8281 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8282 memoryManager,
8283 shape0, input0, 0.5f, 0,
8284 shape1, input1, 1.0f, 0,
8285 shape0, output, 1.0f, 0);
8286}
8287
8288LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
8289 armnn::IWorkloadFactory& workloadFactory,
8290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8291{
8292 const unsigned int shape0[] = { 1, 1, 2, 2 };
8293 const unsigned int shape1[] = { 1, 1, 2, 1 };
8294
8295 std::vector<int16_t> input0({ 10, 12, 14, 16 });
8296 std::vector<int16_t> input1({ 2, 1 });
8297 std::vector<int16_t> output({ 8, 11, 12, 15 });
8298
8299 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
8300 memoryManager,
8301 shape0, input0, 1.0f, 0,
8302 shape1, input1, 1.0f, 0,
8303 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01008304}
8305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008306LayerTestResult<float, 4> BatchNormTest(
8307 armnn::IWorkloadFactory& workloadFactory,
8308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008309{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008310 // BatchSize: 1
8311 // Channels: 2
8312 // Height: 3
8313 // Width: 2
8314
8315 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8316 std::vector<float> inputValues
8317 {
8318 // Batch 0, Channel 0, Height (3) x Width (2)
8319 1.f, 4.f,
8320 4.f, 2.f,
8321 1.f, 6.f,
8322
8323 // Batch 0, Channel 1, Height (3) x Width (2)
8324 1.f, 1.f,
8325 4.f, 1.f,
8326 -2.f, 4.f
8327 };
8328 std::vector<float> expectedOutputValues
8329 {
8330 // Batch 0, Channel 0, Height (3) x Width (2)
8331 1.f, 4.f,
8332 4.f, 2.f,
8333 1.f, 6.f,
8334
8335 // Batch 0, Channel 1, Height (3) x Width (2)
8336 3.f, 3.f,
8337 4.f, 3.f,
8338 2.f, 4.f
8339 };
8340
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008341 return BatchNormTestImpl<armnn::DataType::Float32>(
8342 workloadFactory, memoryManager,
8343 inputOutputShape, inputValues, expectedOutputValues,
8344 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008345}
8346
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008347LayerTestResult<float, 4> BatchNormNhwcTest(
8348 armnn::IWorkloadFactory& workloadFactory,
8349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008350{
8351 // BatchSize: 1
8352 // Height: 3
8353 // Width: 2
8354 // Channels: 2
8355
8356 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8357 std::vector<float> inputValues
8358 {
8359 // Batch 0, Height 0, Width (2) x Channel (2)
8360 1.f, 1.f,
8361 4.f, 1.f,
8362
8363 // Batch 0, Height 1, Width (2) x Channel (2)
8364 4.f, 4.f,
8365 2.f, 1.f,
8366
8367 // Batch 0, Height 2, Width (2) x Channel (2)
8368 1.f, -2.f,
8369 6.f, 4.f
8370 };
8371 std::vector<float> expectedOutputValues
8372 {
8373 // Batch 0, Height 0, Width (2) x Channel (2)
8374 1.f, 3.f,
8375 4.f, 3.f,
8376
8377 // Batch 0, Height 1, Width (2) x Channel (2)
8378 4.f, 4.f,
8379 2.f, 3.f,
8380
8381 // Batch 0, Height 2, Width (2) x Channel (2)
8382 1.f, 2.f,
8383 6.f, 4.f
8384 };
8385
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008386 return BatchNormTestImpl<armnn::DataType::Float32>(
8387 workloadFactory, memoryManager,
8388 inputOutputShape, inputValues, expectedOutputValues,
8389 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008390}
8391
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008392LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8393 armnn::IWorkloadFactory& workloadFactory,
8394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008395{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008396 // BatchSize: 1
8397 // Channels: 2
8398 // Height: 3
8399 // Width: 2
8400
8401 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8402 std::vector<float> inputValues
8403 {
8404 // Batch 0, Channel 0, Height (3) x Width (2)
8405 1.f, 4.f,
8406 4.f, 2.f,
8407 1.f, 6.f,
8408
8409 // Batch 0, Channel 1, Height (3) x Width (2)
8410 1.f, 1.f,
8411 4.f, 1.f,
8412 -2.f, 4.f
8413 };
8414 std::vector<float> expectedOutputValues
8415 {
8416 // Batch 0, Channel 0, Height (3) x Width (2)
8417 1.f, 4.f,
8418 4.f, 2.f,
8419 1.f, 6.f,
8420
8421 // Batch 0, Channel 1, Height (3) x Width (2)
8422 3.f, 3.f,
8423 4.f, 3.f,
8424 2.f, 4.f
8425 };
8426
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008427 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8428 workloadFactory, memoryManager,
8429 inputOutputShape, inputValues, expectedOutputValues,
8430 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008431}
8432
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008433LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8434 armnn::IWorkloadFactory& workloadFactory,
8435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008436{
8437 // BatchSize: 1
8438 // Height: 3
8439 // Width: 2
8440 // Channels: 2
8441
8442 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8443 std::vector<float> inputValues
8444 {
8445 // Batch 0, Height 0, Width (2) x Channel (2)
8446 1.f, 1.f,
8447 4.f, 1.f,
8448
8449 // Batch 0, Height 1, Width (2) x Channel (2)
8450 4.f, 4.f,
8451 2.f, 1.f,
8452
8453 // Batch 0, Height 2, Width (2) x Channel (2)
8454 1.f, -2.f,
8455 6.f, 4.f
8456 };
8457 std::vector<float> expectedOutputValues
8458 {
8459 // Batch 0, Height 0, Width (2) x Channel (2)
8460 1.f, 3.f,
8461 4.f, 3.f,
8462
8463 // Batch 0, Height 1, Width (2) x Channel (2)
8464 4.f, 4.f,
8465 2.f, 3.f,
8466
8467 // Batch 0, Height 2, Width (2) x Channel (2)
8468 1.f, 2.f,
8469 6.f, 4.f
8470 };
8471
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008472 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8473 (workloadFactory, memoryManager,
8474 inputOutputShape, inputValues, expectedOutputValues,
8475 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008476}
8477
Matteo Martincighf5507132019-06-04 10:59:47 +01008478LayerTestResult<int16_t, 4> BatchNormInt16Test(
8479 armnn::IWorkloadFactory& workloadFactory,
8480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8481{
8482 // BatchSize: 1
8483 // Channels: 2
8484 // Height: 3
8485 // Width: 2
8486
8487 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8488 std::vector<float> inputValues
8489 {
8490 // Batch 0, Channel 0, Height (3) x Width (2)
8491 1.f, 4.f,
8492 4.f, 2.f,
8493 1.f, 6.f,
8494
8495 // Batch 0, Channel 1, Height (3) x Width (2)
8496 1.f, 1.f,
8497 4.f, 1.f,
8498 -2.f, 4.f
8499 };
8500 std::vector<float> expectedOutputValues
8501 {
8502 // Batch 0, Channel 0, Height (3) x Width (2)
8503 1.f, 4.f,
8504 4.f, 2.f,
8505 1.f, 6.f,
8506
8507 // Batch 0, Channel 1, Height (3) x Width (2)
8508 3.f, 3.f,
8509 4.f, 3.f,
8510 2.f, 4.f
8511 };
8512
8513 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
8514 workloadFactory, memoryManager,
8515 inputOutputShape, inputValues, expectedOutputValues,
8516 1.f/20.f, 50, armnn::DataLayout::NCHW);
8517}
8518
8519LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
8520 armnn::IWorkloadFactory& workloadFactory,
8521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8522{
8523 // BatchSize: 1
8524 // Height: 3
8525 // Width: 2
8526 // Channels: 2
8527
8528 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8529 std::vector<float> inputValues
8530 {
8531 // Batch 0, Height 0, Width (2) x Channel (2)
8532 1.f, 1.f,
8533 4.f, 1.f,
8534
8535 // Batch 0, Height 1, Width (2) x Channel (2)
8536 4.f, 4.f,
8537 2.f, 1.f,
8538
8539 // Batch 0, Height 2, Width (2) x Channel (2)
8540 1.f, -2.f,
8541 6.f, 4.f
8542 };
8543 std::vector<float> expectedOutputValues
8544 {
8545 // Batch 0, Height 0, Width (2) x Channel (2)
8546 1.f, 3.f,
8547 4.f, 3.f,
8548
8549 // Batch 0, Height 1, Width (2) x Channel (2)
8550 4.f, 4.f,
8551 2.f, 3.f,
8552
8553 // Batch 0, Height 2, Width (2) x Channel (2)
8554 1.f, 2.f,
8555 6.f, 4.f
8556 };
8557
8558 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>
8559 (workloadFactory, memoryManager,
8560 inputOutputShape, inputValues, expectedOutputValues,
8561 1.f/20.f, 50, armnn::DataLayout::NHWC);
8562}
8563
Nina Drozd58ef2c62019-05-16 12:09:18 +01008564LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008565 armnn::IWorkloadFactory& workloadFactory,
8566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008567{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008568 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008569}
8570
Nina Drozd58ef2c62019-05-16 12:09:18 +01008571LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8572 armnn::IWorkloadFactory& workloadFactory,
8573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8574{
8575 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8576}
8577
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008578LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8579 armnn::IWorkloadFactory& workloadFactory,
8580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008581{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008582 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008583}
8584
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008585LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8586 armnn::IWorkloadFactory& workloadFactory,
8587 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008588{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008589 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008590}
8591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008592LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8593 armnn::IWorkloadFactory& workloadFactory,
8594 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008595{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008596 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008597}
8598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008599LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8600 armnn::IWorkloadFactory& workloadFactory,
8601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008602{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008603 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8604 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008605}
8606
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008607LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8608 armnn::IWorkloadFactory& workloadFactory,
8609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008610{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008611 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8612 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008613}
8614
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008615LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8616 armnn::IWorkloadFactory& workloadFactory,
8617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008618{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008619 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008620}
8621
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008622LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8623 armnn::IWorkloadFactory& workloadFactory,
8624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008625{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008626 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008627}
8628
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008629LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8630 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8632 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008633{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008634 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8635 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008636}
8637
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008638LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8639 armnn::IWorkloadFactory& workloadFactory,
8640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008641{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008642 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008643}
8644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008645LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8646 armnn::IWorkloadFactory& workloadFactory,
8647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008649 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8650 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008651}
8652
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008653LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8654 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8656 bool useSubtensor)
8657{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008658 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8659 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008660}
8661
8662LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8663 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008665{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008666 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008667}
8668
8669LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8670 armnn::IWorkloadFactory& workloadFactory,
8671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8672{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008673 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008674}
8675
8676LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8677 armnn::IWorkloadFactory& workloadFactory,
8678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8679{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008680 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008681}
8682
8683LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8684 armnn::IWorkloadFactory& workloadFactory,
8685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8686{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008687 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8688 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008689}
8690
8691LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8692 armnn::IWorkloadFactory& workloadFactory,
8693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8694{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008695 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8696 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008697}
8698
8699LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8700 armnn::IWorkloadFactory& workloadFactory,
8701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8702{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008703 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8704 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008705}
8706
8707LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8708 armnn::IWorkloadFactory& workloadFactory,
8709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8710{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008711 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8712 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008713}
8714
8715LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8716 armnn::IWorkloadFactory& workloadFactory,
8717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8718 bool useSubtensor)
8719{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008720 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8721 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008722}
8723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008724LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8725 armnn::IWorkloadFactory& workloadFactory,
8726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8727 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008728{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008729 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8730 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008731}
8732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008733LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8734 armnn::IWorkloadFactory& workloadFactory,
8735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8736 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008737{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008738 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008739 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008740}
8741
Teresa Charlin0434df62019-06-06 13:40:35 +01008742LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
8743 armnn::IWorkloadFactory& workloadFactory,
8744 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8745 bool forceNoPadding)
8746{
8747 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedSymm16>(
8748 workloadFactory, memoryManager, forceNoPadding);
8749}
8750
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008751LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8752 armnn::IWorkloadFactory& workloadFactory,
8753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8754 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008755{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008756 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8757 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008758}
8759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008760LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8761 armnn::IWorkloadFactory& workloadFactory,
8762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8763 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008764{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008765 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008766 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008767}
8768
Teresa Charlin0434df62019-06-06 13:40:35 +01008769LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
8770 armnn::IWorkloadFactory& workloadFactory,
8771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8772 bool forceNoPadding)
8773{
8774 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedSymm16>(
8775 workloadFactory, memoryManager, forceNoPadding);
8776}
8777
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008778LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8779 armnn::IWorkloadFactory& workloadFactory,
8780 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008781 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008782{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008783 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008784}
8785
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008786LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8787 armnn::IWorkloadFactory& workloadFactory,
8788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008789 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008790{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008791 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008792}
8793
Teresa Charlin0434df62019-06-06 13:40:35 +01008794LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
8795 armnn::IWorkloadFactory& workloadFactory,
8796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8797 const armnn::DataLayout dataLayout)
8798{
8799 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8800}
8801LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8802 armnn::IWorkloadFactory& workloadFactory,
8803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8804{
8805 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8806}
8807
8808LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8809 armnn::IWorkloadFactory& workloadFactory,
8810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8811{
8812 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8813 workloadFactory, memoryManager, 1.0f, -5);
8814}
8815
8816LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
8817 armnn::IWorkloadFactory& workloadFactory,
8818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8819{
8820 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8821 workloadFactory, memoryManager);
8822}
8823
8824LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8825 armnn::IWorkloadFactory& workloadFactory,
8826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8827{
8828 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8829}
8830
8831LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8832 armnn::IWorkloadFactory& workloadFactory,
8833 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8834{
8835 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8836 workloadFactory, memoryManager, 1.0f, -5);
8837}
8838
8839LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
8840 armnn::IWorkloadFactory& workloadFactory,
8841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8842{
8843 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8844 workloadFactory, memoryManager);
8845}
8846
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008847LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8848 armnn::IWorkloadFactory& workloadFactory,
8849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008850 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008851{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008852 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008853}
8854
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008855LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8856 armnn::IWorkloadFactory& workloadFactory,
8857 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008858 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008859{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008860 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008861 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008862}
8863
Teresa Charlin0434df62019-06-06 13:40:35 +01008864LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
8865 armnn::IWorkloadFactory& workloadFactory,
8866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8867 const armnn::DataLayout dataLayout)
8868{
8869 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8870 workloadFactory, memoryManager, dataLayout);
8871}
8872
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008873LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8874 armnn::IWorkloadFactory& workloadFactory,
8875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8876 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008877{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008878 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008879 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008880}
8881
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008882LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8883 armnn::IWorkloadFactory& workloadFactory,
8884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008885{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008886 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008887}
8888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008889LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8890 armnn::IWorkloadFactory& workloadFactory,
8891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008892{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008893 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8894 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008895}
8896
Teresa Charlin0434df62019-06-06 13:40:35 +01008897LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
8898 armnn::IWorkloadFactory& workloadFactory,
8899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8900{
8901 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8902 workloadFactory, memoryManager);
8903}
8904LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8905 armnn::IWorkloadFactory& workloadFactory,
8906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8907{
8908 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8909}
8910
8911LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8912 armnn::IWorkloadFactory& workloadFactory,
8913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8914{
8915 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8916 workloadFactory, memoryManager);
8917}
8918
8919LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
8920 armnn::IWorkloadFactory& workloadFactory,
8921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8922{
8923 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
8924 workloadFactory, memoryManager);
8925}
8926
8927LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8928 armnn::IWorkloadFactory& workloadFactory,
8929 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8930{
8931 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8932 workloadFactory, memoryManager);
8933}
8934
8935LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
8936 armnn::IWorkloadFactory& workloadFactory,
8937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8938{
8939 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8940 workloadFactory, memoryManager);
8941}
8942
8943LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
8944 armnn::IWorkloadFactory& workloadFactory,
8945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8946{
8947 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedSymm16>(
8948 workloadFactory, memoryManager);
8949}
8950
8951LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8952 armnn::IWorkloadFactory& workloadFactory,
8953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8954{
8955 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
8956}
8957
8958LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8959 armnn::IWorkloadFactory& workloadFactory,
8960 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8961{
8962 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8963 workloadFactory, memoryManager);
8964}
8965
8966LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
8967 armnn::IWorkloadFactory& workloadFactory,
8968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8969{
8970 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(
8971 workloadFactory, memoryManager);
8972}
8973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008974LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8975 armnn::IWorkloadFactory& workloadFactory,
8976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008977 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008978{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008979 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008980}
8981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008982LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8983 armnn::IWorkloadFactory& workloadFactory,
8984 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008985 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008986{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008987 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008988}
8989
Teresa Charlin0434df62019-06-06 13:40:35 +01008990LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
8991 armnn::IWorkloadFactory& workloadFactory,
8992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8993 const armnn::DataLayout dataLayout)
8994{
8995 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, dataLayout);
8996}
8997
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008998LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8999 armnn::IWorkloadFactory& workloadFactory,
9000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009001{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009002 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009003}
9004
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009005LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
9006 armnn::IWorkloadFactory& workloadFactory,
9007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009008{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009009 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009010}
9011
Teresa Charlin0434df62019-06-06 13:40:35 +01009012LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
9013 armnn::IWorkloadFactory& workloadFactory,
9014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9015{
9016 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9017}
9018
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009019LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
9020 armnn::IWorkloadFactory& workloadFactory,
9021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009022{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009023 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009024}
9025
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009026LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
9027 armnn::IWorkloadFactory& workloadFactory,
9028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009030 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009031}
9032
Teresa Charlin0434df62019-06-06 13:40:35 +01009033LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
9034 armnn::IWorkloadFactory& workloadFactory,
9035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9036{
9037 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9038}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009039LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
9040 armnn::IWorkloadFactory& workloadFactory,
9041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009042{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009043 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009044}
9045
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009046LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
9047 armnn::IWorkloadFactory& workloadFactory,
9048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009049{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009050 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009051}
9052
Teresa Charlin0434df62019-06-06 13:40:35 +01009053LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
9054 armnn::IWorkloadFactory& workloadFactory,
9055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9056{
9057 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9058}
9059
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009060LayerTestResult<float, 4> L2Pooling2dSize7Test(
9061 armnn::IWorkloadFactory& workloadFactory,
9062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009063{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009064 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009065}
9066
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009067LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
9068 armnn::IWorkloadFactory& workloadFactory,
9069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009070{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009071 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009072}
9073
Teresa Charlin0434df62019-06-06 13:40:35 +01009074LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
9075 armnn::IWorkloadFactory& workloadFactory,
9076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9077{
9078 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9079}
9080
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009081LayerTestResult<float, 4> L2Pooling2dSize9Test(
9082 armnn::IWorkloadFactory& workloadFactory,
9083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009084{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009085 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009086}
9087
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009088LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
9089 armnn::IWorkloadFactory& workloadFactory,
9090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009091{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009092 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009093}
9094
Teresa Charlin0434df62019-06-06 13:40:35 +01009095LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
9096 armnn::IWorkloadFactory& workloadFactory,
9097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9098{
9099 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9100}
9101LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
9102 armnn::IWorkloadFactory& workloadFactory,
9103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9104{
9105 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9106}
9107
9108LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
9109 armnn::IWorkloadFactory& workloadFactory,
9110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9111{
9112 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9113}
9114
9115LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
9116 armnn::IWorkloadFactory& workloadFactory,
9117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9118{
9119 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9120}
9121
9122LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
9123 armnn::IWorkloadFactory& workloadFactory,
9124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9125{
9126 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
9127}
9128
9129LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
9130 armnn::IWorkloadFactory& workloadFactory,
9131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9132{
9133 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9134}
9135
9136LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
9137 armnn::IWorkloadFactory& workloadFactory,
9138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9139{
9140 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9141}
9142
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009143LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
9144 armnn::IWorkloadFactory& workloadFactory,
9145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009146{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009147 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009148}
9149
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009150LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
9151 armnn::IWorkloadFactory& workloadFactory,
9152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00009153{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009154 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00009155}
9156
Teresa Charlin0434df62019-06-06 13:40:35 +01009157LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
9158 armnn::IWorkloadFactory& workloadFactory,
9159 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9160{
9161 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9162}
9163
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009164LayerTestResult<float, 4> ComparePooling2dTest(
9165 armnn::IWorkloadFactory& workloadFactory,
9166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9167 armnn::IWorkloadFactory& refWorkloadFactory,
9168 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009169{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009170 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009171 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00009172}
9173
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009174LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
9175 armnn::IWorkloadFactory& workloadFactory,
9176 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9177 armnn::IWorkloadFactory& refWorkloadFactory,
9178 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00009179{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009180 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009181 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00009182}
9183
Teresa Charlin0434df62019-06-06 13:40:35 +01009184LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
9185 armnn::IWorkloadFactory& workloadFactory,
9186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9187 armnn::IWorkloadFactory& refWorkloadFactory,
9188 armnn::PoolingAlgorithm poolingType)
9189{
9190 return ComparePooling2dTestCommon<armnn::DataType::QuantisedSymm16>(
9191 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
9192}
9193
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009194LayerTestResult<float, 2> FullyConnectedLargeTest(
9195 armnn::IWorkloadFactory& workloadFactory,
9196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9197 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00009198{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009199 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00009200}
9201
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009202LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9203 armnn::IWorkloadFactory& workloadFactory,
9204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009205{
9206 // Create Initial Tensor
9207 // 1, 2, 3
9208 // 4, 5, 6
9209 // 7, 8, 9
9210
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009211 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9212 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009213
9214 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9215 {1, 2, 3,
9216 4, 5, 6,
9217 7, 8, 9
9218 });
9219
9220 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9221 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9222 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9223 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9224
9225 // Apply MaxPool poolSize = 1x1, stride=2x2
9226 // Result =
9227 // 1, 3
9228 // 7, 9
9229 armnn::Pooling2dDescriptor descriptor;
9230 descriptor.m_PoolHeight = 1;
9231 descriptor.m_PoolWidth = 1;
9232 descriptor.m_StrideX = 2;
9233 descriptor.m_StrideY = 2;
9234 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9235
9236 armnn::Pooling2dQueueDescriptor queueDescriptor;
9237 queueDescriptor.m_Parameters = descriptor;
9238 armnn::WorkloadInfo workloadInfo;
9239 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9240 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9241
9242 // Create the MaxPool
9243 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9244
9245 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9246 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9247 boost::multi_array<float, 4> resultMaxPool;
9248 resultMaxPool.resize(shape);
9249
9250
9251 // Create addition with another tensor the same size
9252 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9253 // with the initial tensor.
9254 // 12, 16
9255 // 24, 28
9256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009257 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9258 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009259
9260 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9261 {12, 16,
9262 24, 28,
9263 });
9264
9265 // Expected output tensor after MaxPool and Addition.
9266 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9267 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9268 {
9269 13, 19,
9270 31, 37
9271 }));
9272
9273 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9274 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9275
9276 armnn::AdditionQueueDescriptor data;
9277 armnn::WorkloadInfo info;
9278
9279 // Add the output of the MaxPool and the new tensor
9280 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9281 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9282 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9283
9284 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9285
9286 poolingInputHandle->Allocate();
9287 poolingOutputHandle->Allocate();
9288 addInputHandle->Allocate();
9289 addOutputHandle->Allocate();
9290
9291 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9292 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9293
9294 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9295 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9296
Derek Lambertif30f7d32019-04-09 10:25:02 +01009297 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009298 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009299 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009300 addWorkload->Execute();
9301
9302 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9303
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009304 return addRet;
9305}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009307LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9308 armnn::IWorkloadFactory& workloadFactory,
9309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009311 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009312}
9313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009314LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9315 armnn::IWorkloadFactory& workloadFactory,
9316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009317{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009318 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009319}
9320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009321LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9322 armnn::IWorkloadFactory& workloadFactory,
9323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009324{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009325 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009326}
9327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009328LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9329 armnn::IWorkloadFactory& workloadFactory,
9330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009331{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009332 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009333}
9334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009335LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9336 armnn::IWorkloadFactory& workloadFactory,
9337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009338{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009339 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009340}
9341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009342LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9343 armnn::IWorkloadFactory& workloadFactory,
9344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009345{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009346 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009347}
9348
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009349LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9350 armnn::IWorkloadFactory& workloadFactory,
9351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009352{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009353 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009354}
9355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009356LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9357 armnn::IWorkloadFactory& workloadFactory,
9358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009359{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009360 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009361}
9362
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009363LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9364 armnn::IWorkloadFactory& workloadFactory,
9365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009366{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009367 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009368}
9369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009370LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9371 armnn::IWorkloadFactory& workloadFactory,
9372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009373{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009374 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009375}
9376
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009377LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9378 armnn::IWorkloadFactory& workloadFactory,
9379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009381 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009382}
9383
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009384LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9385 armnn::IWorkloadFactory& workloadFactory,
9386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009387{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009388 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009389}
9390
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009391LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9392 armnn::IWorkloadFactory& workloadFactory,
9393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009394{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009395 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009396}
9397
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009398LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9399 armnn::IWorkloadFactory& workloadFactory,
9400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009401{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009402 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009403}
9404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009405LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9406 armnn::IWorkloadFactory& workloadFactory,
9407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009408{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009409 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009410}
9411
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009412LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9413 armnn::IWorkloadFactory& workloadFactory,
9414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009415{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009416 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009417}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009418
nikraj01120522a2019-05-31 11:33:07 +01009419LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleUint16Test(
9420 armnn::IWorkloadFactory& workloadFactory,
9421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9422{
9423 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9424}
9425
9426LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsUint16Test(
9427 armnn::IWorkloadFactory& workloadFactory,
9428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9429{
9430 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9431}
9432
9433LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockUint16Test(
9434 armnn::IWorkloadFactory& workloadFactory,
9435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9436{
9437 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9438}
9439
9440LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingUint16Test(
9441 armnn::IWorkloadFactory& workloadFactory,
9442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9443{
9444 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9445}
9446
9447LayerTestResult<int16_t, 4> SpaceToBatchNdSimpleNHWCUint16Test(
9448 armnn::IWorkloadFactory& workloadFactory,
9449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9450{
9451 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9452}
9453
9454LayerTestResult<int16_t, 4> SpaceToBatchNdMultiChannelsNHWCUint16Test(
9455 armnn::IWorkloadFactory& workloadFactory,
9456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9457{
9458 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9459}
9460
9461LayerTestResult<int16_t, 4> SpaceToBatchNdMultiBlockNHWCUint16Test(
9462 armnn::IWorkloadFactory& workloadFactory,
9463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9464{
9465 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9466}
9467
9468LayerTestResult<int16_t, 4> SpaceToBatchNdPaddingNHWCUint16Test(
9469 armnn::IWorkloadFactory& workloadFactory,
9470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9471{
9472 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9473}
9474
Keith Davisa57eccb2019-06-14 17:33:22 +01009475LayerTestResult<uint8_t, 4> SpaceToDepthNHWCAsymmQ8Test(
9476 armnn::IWorkloadFactory& workloadFactory,
9477 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9478{
James Conroyd2aa85e2019-07-01 17:12:40 +01009479 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009480 workloadFactory,
9481 memoryManager);
9482}
9483
9484LayerTestResult<uint8_t, 4> SpaceToDepthNCHWAsymmQ8Test(
9485 armnn::IWorkloadFactory& workloadFactory,
9486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9487{
James Conroyd2aa85e2019-07-01 17:12:40 +01009488 return SpaceToDepthSimpleTest1<armnn::DataType::QuantisedAsymm8>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009489 workloadFactory,
9490 memoryManager,
9491 armnn::DataLayout::NCHW);
9492}
9493
James Conroyd2aa85e2019-07-01 17:12:40 +01009494LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009495 armnn::IWorkloadFactory& workloadFactory,
9496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9497{
James Conroyd2aa85e2019-07-01 17:12:40 +01009498 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009499 workloadFactory,
9500 memoryManager);
9501}
9502
James Conroyd2aa85e2019-07-01 17:12:40 +01009503LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test1(
Keith Davisa57eccb2019-06-14 17:33:22 +01009504 armnn::IWorkloadFactory& workloadFactory,
9505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9506{
James Conroyd2aa85e2019-07-01 17:12:40 +01009507 return SpaceToDepthSimpleTest1<armnn::DataType::Float32>(
9508 workloadFactory,
9509 memoryManager,
9510 armnn::DataLayout::NCHW);
9511}
9512
9513LayerTestResult<float, 4> SpaceToDepthNHWCFloat32Test2(
9514 armnn::IWorkloadFactory& workloadFactory,
9515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9516{
9517 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9518 workloadFactory,
9519 memoryManager);
9520}
9521
9522LayerTestResult<float, 4> SpaceToDepthNCHWFloat32Test2(
9523 armnn::IWorkloadFactory& workloadFactory,
9524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9525{
9526 return SpaceToDepthSimpleTest2<armnn::DataType::Float32>(
9527 workloadFactory,
9528 memoryManager,
9529 armnn::DataLayout::NCHW);
9530}
9531
9532LayerTestResult<int16_t, 4> SpaceToDepthNHWCQSymm16Test(
9533 armnn::IWorkloadFactory& workloadFactory,
9534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9535{
9536 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
9537 workloadFactory,
9538 memoryManager);
9539}
9540
9541LayerTestResult<int16_t, 4> SpaceToDepthNCHWQSymm16Test(
9542 armnn::IWorkloadFactory& workloadFactory,
9543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9544{
9545 return SpaceToDepthSimpleTest2<armnn::DataType::QuantisedSymm16>(
Keith Davisa57eccb2019-06-14 17:33:22 +01009546 workloadFactory,
9547 memoryManager,
9548 armnn::DataLayout::NCHW);
9549}
9550
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009551namespace {
9552
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009553} // anonymous namespace
9554
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009555LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9556 armnn::IWorkloadFactory& workloadFactory,
9557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9558{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009559 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009560}
9561
9562LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9563 armnn::IWorkloadFactory& workloadFactory,
9564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9565{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009566 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009567}
9568
9569LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9570 armnn::IWorkloadFactory& workloadFactory,
9571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9572{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009573 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009574}
9575
9576LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9577 armnn::IWorkloadFactory& workloadFactory,
9578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9579{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009580 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009581}
9582
9583LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9584 armnn::IWorkloadFactory& workloadFactory,
9585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9586{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009587 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009588}
9589
9590LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9591 armnn::IWorkloadFactory& workloadFactory,
9592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9593{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009594 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009595}
9596
9597LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9598 armnn::IWorkloadFactory& workloadFactory,
9599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9600{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009601 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009602}
9603
9604LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9605 armnn::IWorkloadFactory& workloadFactory,
9606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9607{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009608 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009609}
9610
9611LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9612 armnn::IWorkloadFactory& workloadFactory,
9613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9614{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009615 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009616}
9617
9618LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9619 armnn::IWorkloadFactory& workloadFactory,
9620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9621{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009622 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009623}
9624
9625LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9626 armnn::IWorkloadFactory& workloadFactory,
9627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9628{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009629 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009630}
9631
9632LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9633 armnn::IWorkloadFactory& workloadFactory,
9634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9635{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009636 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009637}
9638
9639LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9640 armnn::IWorkloadFactory& workloadFactory,
9641 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9642{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009643 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009644}
9645
9646LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9647 armnn::IWorkloadFactory& workloadFactory,
9648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9649{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009650 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009651}
9652
9653LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9654 armnn::IWorkloadFactory& workloadFactory,
9655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9656{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009657 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009658}
9659
9660LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9661 armnn::IWorkloadFactory& workloadFactory,
9662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9663{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009664 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009665}
9666
9667LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9668 armnn::IWorkloadFactory& workloadFactory,
9669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009671 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009672}
9673
9674LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9675 armnn::IWorkloadFactory& workloadFactory,
9676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9677{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009678 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009679}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009680
Matteo Martincigh42666a12019-05-29 08:53:41 +01009681LayerTestResult<int16_t, 4> StridedSlice4DInt16Test(
9682 armnn::IWorkloadFactory& workloadFactory,
9683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9684{
9685 return StridedSlice4DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9686}
9687
9688LayerTestResult<int16_t, 4> StridedSlice4DReverseInt16Test(
9689 armnn::IWorkloadFactory& workloadFactory,
9690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9691{
9692 return StridedSlice4DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9693}
9694
9695LayerTestResult<int16_t, 4> StridedSliceSimpleStrideInt16Test(
9696 armnn::IWorkloadFactory& workloadFactory,
9697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9698{
9699 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9700}
9701
9702LayerTestResult<int16_t, 4> StridedSliceSimpleRangeMaskInt16Test(
9703 armnn::IWorkloadFactory& workloadFactory,
9704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9705{
9706 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9707}
9708
9709LayerTestResult<int16_t, 2> StridedSliceShrinkAxisMaskInt16Test(
9710 armnn::IWorkloadFactory& workloadFactory,
9711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9712{
9713 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9714}
9715
9716LayerTestResult<int16_t, 3> StridedSlice3DInt16Test(
9717 armnn::IWorkloadFactory& workloadFactory,
9718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9719{
9720 return StridedSlice3DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9721}
9722
9723LayerTestResult<int16_t, 3> StridedSlice3DReverseInt16Test(
9724 armnn::IWorkloadFactory& workloadFactory,
9725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9726{
9727 return StridedSlice3DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9728}
9729
9730LayerTestResult<int16_t, 2> StridedSlice2DInt16Test(
9731 armnn::IWorkloadFactory& workloadFactory,
9732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9733{
9734 return StridedSlice2DTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9735}
9736
9737LayerTestResult<int16_t, 2> StridedSlice2DReverseInt16Test(
9738 armnn::IWorkloadFactory& workloadFactory,
9739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9740{
9741 return StridedSlice2DReverseTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9742}
9743
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009744LayerTestResult<float, 4> Debug4DFloat32Test(
9745 armnn::IWorkloadFactory& workloadFactory,
9746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009748 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009749}
9750
9751LayerTestResult<float, 3> Debug3DFloat32Test(
9752 armnn::IWorkloadFactory& workloadFactory,
9753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009755 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009756}
9757
9758LayerTestResult<float, 2> Debug2DFloat32Test(
9759 armnn::IWorkloadFactory& workloadFactory,
9760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009762 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009763}
9764
9765LayerTestResult<float, 1> Debug1DFloat32Test(
9766 armnn::IWorkloadFactory& workloadFactory,
9767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009769 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009770}
9771
9772LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9773 armnn::IWorkloadFactory& workloadFactory,
9774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9775{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009776 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009777}
9778
9779LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9780 armnn::IWorkloadFactory& workloadFactory,
9781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9782{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009783 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009784}
9785
9786LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9787 armnn::IWorkloadFactory& workloadFactory,
9788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9789{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009790 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009791}
9792
9793LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9794 armnn::IWorkloadFactory& workloadFactory,
9795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9796{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009797 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009798}
Matteo Martincigh49124022019-01-11 13:25:59 +00009799
narpra014951d842019-01-18 16:53:53 +00009800LayerTestResult<float, 1> Gather1DParamsFloatTest(
9801 armnn::IWorkloadFactory& workloadFactory,
9802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9803{
9804 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9805}
9806
9807LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9808 armnn::IWorkloadFactory& workloadFactory,
9809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9810{
9811 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9812}
9813
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009814LayerTestResult<int16_t, 1> Gather1DParamsInt16Test(
9815 armnn::IWorkloadFactory& workloadFactory,
9816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9817{
9818 return Gather1DParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9819}
9820
narpra014951d842019-01-18 16:53:53 +00009821LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9822 armnn::IWorkloadFactory& workloadFactory,
9823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824{
9825 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9826}
9827
9828LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9829 armnn::IWorkloadFactory& workloadFactory,
9830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9831{
9832 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9833}
9834
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009835LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
9836 armnn::IWorkloadFactory& workloadFactory,
9837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9838{
9839 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9840}
9841
narpra014951d842019-01-18 16:53:53 +00009842LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9843 armnn::IWorkloadFactory& workloadFactory,
9844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9845{
9846 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9847}
9848
9849LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9850 armnn::IWorkloadFactory& workloadFactory,
9851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9852{
9853 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9854 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009855}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009856
Ellen Norris-Thompsone0dbedf2019-06-24 09:23:38 +01009857LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
9858 armnn::IWorkloadFactory& workloadFactory,
9859 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9860{
9861 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedSymm16>(
9862 workloadFactory, memoryManager);
9863}
9864
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009865LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009866 armnn::IWorkloadFactory& workloadFactory,
9867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9868{
9869 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9870}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009871
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009872LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9873 armnn::IWorkloadFactory& workloadFactory,
9874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9875{
9876 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9877}
9878
9879LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9880 armnn::IWorkloadFactory& workloadFactory,
9881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9882{
9883 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9884}
9885
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009886LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9887 armnn::IWorkloadFactory& workloadFactory,
9888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9889{
9890 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9891}
9892
9893LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9894 armnn::IWorkloadFactory& workloadFactory,
9895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9896{
9897 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9898}
9899
9900LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9901 armnn::IWorkloadFactory& workloadFactory,
9902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9903{
9904 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9905}
Aron Virginas-Tar735a4502019-06-26 15:02:47 +01009906
9907//
9908// TransposeConvolution2d
9909//
9910
9911// Simple biased
9912LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNchwTest(
9913 armnn::IWorkloadFactory& workloadFactory,
9914 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9915{
9916 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9917 workloadFactory,
9918 memoryManager,
9919 true,
9920 armnn::DataLayout::NCHW);
9921}
9922
9923LayerTestResult<float, 4> SimpleTransposeConvolution2dFloatNhwcTest(
9924 armnn::IWorkloadFactory& workloadFactory,
9925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9926{
9927 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9928 workloadFactory,
9929 memoryManager,
9930 true,
9931 armnn::DataLayout::NHWC);
9932}
9933
9934LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NchwTest(
9935 armnn::IWorkloadFactory& workloadFactory,
9936 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9937{
9938 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9939 workloadFactory,
9940 memoryManager,
9941 true,
9942 armnn::DataLayout::NCHW);
9943}
9944
9945LayerTestResult<uint8_t, 4> SimpleTransposeConvolution2dUint8NhwcTest(
9946 armnn::IWorkloadFactory& workloadFactory,
9947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9948{
9949 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
9950 workloadFactory,
9951 memoryManager,
9952 true,
9953 armnn::DataLayout::NHWC);
9954}
9955
9956LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NchwTest(
9957 armnn::IWorkloadFactory& workloadFactory,
9958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9959{
9960 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9961 workloadFactory,
9962 memoryManager,
9963 true,
9964 armnn::DataLayout::NCHW);
9965}
9966
9967LayerTestResult<int16_t, 4> SimpleTransposeConvolution2dInt16NhwcTest(
9968 armnn::IWorkloadFactory& workloadFactory,
9969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9970{
9971 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
9972 workloadFactory,
9973 memoryManager,
9974 true,
9975 armnn::DataLayout::NHWC);
9976}
9977
9978// Simple unbiased
9979LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNchwTest(
9980 armnn::IWorkloadFactory& workloadFactory,
9981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9982{
9983 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9984 workloadFactory,
9985 memoryManager,
9986 false,
9987 armnn::DataLayout::NCHW);
9988}
9989
9990LayerTestResult<float, 4> UnbiasedSimpleTransposeConvolution2dFloatNhwcTest(
9991 armnn::IWorkloadFactory& workloadFactory,
9992 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9993{
9994 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
9995 workloadFactory,
9996 memoryManager,
9997 false,
9998 armnn::DataLayout::NHWC);
9999}
10000
10001LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NchwTest(
10002 armnn::IWorkloadFactory& workloadFactory,
10003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10004{
10005 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10006 workloadFactory,
10007 memoryManager,
10008 false,
10009 armnn::DataLayout::NCHW);
10010}
10011
10012LayerTestResult<uint8_t, 4> UnbiasedSimpleTransposeConvolution2dUint8NhwcTest(
10013 armnn::IWorkloadFactory& workloadFactory,
10014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10015{
10016 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10017 workloadFactory,
10018 memoryManager,
10019 false,
10020 armnn::DataLayout::NHWC);
10021}
10022
10023LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NchwTest(
10024 armnn::IWorkloadFactory& workloadFactory,
10025 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10026{
10027 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10028 workloadFactory,
10029 memoryManager,
10030 false,
10031 armnn::DataLayout::NCHW);
10032}
10033
10034LayerTestResult<int16_t, 4> UnbiasedSimpleTransposeConvolution2dInt16NhwcTest(
10035 armnn::IWorkloadFactory& workloadFactory,
10036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10037{
10038 return SimpleTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10039 workloadFactory,
10040 memoryManager,
10041 false,
10042 armnn::DataLayout::NHWC);
10043}
10044
10045// Padded biased
10046LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNchwTest(
10047 armnn::IWorkloadFactory& workloadFactory,
10048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10049{
10050 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10051 workloadFactory,
10052 memoryManager,
10053 true,
10054 armnn::DataLayout::NCHW);
10055}
10056
10057LayerTestResult<float, 4> PaddedTransposeConvolution2dFloatNhwcTest(
10058 armnn::IWorkloadFactory& workloadFactory,
10059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10060{
10061 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10062 workloadFactory,
10063 memoryManager,
10064 true,
10065 armnn::DataLayout::NHWC);
10066}
10067
10068LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NchwTest(
10069 armnn::IWorkloadFactory& workloadFactory,
10070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10071{
10072 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10073 workloadFactory,
10074 memoryManager,
10075 true,
10076 armnn::DataLayout::NCHW);
10077}
10078
10079LayerTestResult<uint8_t, 4> PaddedTransposeConvolution2dUint8NhwcTest(
10080 armnn::IWorkloadFactory& workloadFactory,
10081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10082{
10083 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10084 workloadFactory,
10085 memoryManager,
10086 true,
10087 armnn::DataLayout::NHWC);
10088}
10089
10090LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NchwTest(
10091 armnn::IWorkloadFactory& workloadFactory,
10092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10093{
10094 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10095 workloadFactory,
10096 memoryManager,
10097 true,
10098 armnn::DataLayout::NCHW);
10099}
10100
10101LayerTestResult<int16_t, 4> PaddedTransposeConvolution2dInt16NhwcTest(
10102 armnn::IWorkloadFactory& workloadFactory,
10103 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10104{
10105 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10106 workloadFactory,
10107 memoryManager,
10108 true,
10109 armnn::DataLayout::NHWC);
10110}
10111
10112// Padded unbiased
10113LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNchwTest(
10114 armnn::IWorkloadFactory& workloadFactory,
10115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10116{
10117 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10118 workloadFactory,
10119 memoryManager,
10120 false,
10121 armnn::DataLayout::NCHW);
10122}
10123
10124LayerTestResult<float, 4> UnbiasedPaddedTransposeConvolution2dFloatNhwcTest(
10125 armnn::IWorkloadFactory& workloadFactory,
10126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10127{
10128 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10129 workloadFactory,
10130 memoryManager,
10131 false,
10132 armnn::DataLayout::NHWC);
10133}
10134
10135LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NchwTest(
10136 armnn::IWorkloadFactory& workloadFactory,
10137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10138{
10139 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10140 workloadFactory,
10141 memoryManager,
10142 false,
10143 armnn::DataLayout::NCHW);
10144}
10145
10146LayerTestResult<uint8_t, 4> UnbiasedPaddedTransposeConvolution2dUint8NhwcTest(
10147 armnn::IWorkloadFactory& workloadFactory,
10148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10149{
10150 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10151 workloadFactory,
10152 memoryManager,
10153 false,
10154 armnn::DataLayout::NHWC);
10155}
10156
10157LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NchwTest(
10158 armnn::IWorkloadFactory& workloadFactory,
10159 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10160{
10161 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10162 workloadFactory,
10163 memoryManager,
10164 false,
10165 armnn::DataLayout::NCHW);
10166}
10167
10168LayerTestResult<int16_t, 4> UnbiasedPaddedTransposeConvolution2dInt16NhwcTest(
10169 armnn::IWorkloadFactory& workloadFactory,
10170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10171{
10172 return PaddedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10173 workloadFactory,
10174 memoryManager,
10175 false,
10176 armnn::DataLayout::NHWC);
10177}
10178
10179// Strided biased
10180LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNchwTest(
10181 armnn::IWorkloadFactory& workloadFactory,
10182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10183{
10184 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10185 workloadFactory,
10186 memoryManager,
10187 true,
10188 armnn::DataLayout::NCHW);
10189}
10190
10191LayerTestResult<float, 4> StridedTransposeConvolution2dFloatNhwcTest(
10192 armnn::IWorkloadFactory& workloadFactory,
10193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10194{
10195 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10196 workloadFactory,
10197 memoryManager,
10198 true,
10199 armnn::DataLayout::NHWC);
10200}
10201
10202LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NchwTest(
10203 armnn::IWorkloadFactory& workloadFactory,
10204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10205{
10206 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10207 workloadFactory,
10208 memoryManager,
10209 true,
10210 armnn::DataLayout::NCHW);
10211}
10212
10213LayerTestResult<uint8_t, 4> StridedTransposeConvolution2dUint8NhwcTest(
10214 armnn::IWorkloadFactory& workloadFactory,
10215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10216{
10217 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10218 workloadFactory,
10219 memoryManager,
10220 true,
10221 armnn::DataLayout::NHWC);
10222}
10223
10224LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NchwTest(
10225 armnn::IWorkloadFactory& workloadFactory,
10226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10227{
10228 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10229 workloadFactory,
10230 memoryManager,
10231 true,
10232 armnn::DataLayout::NCHW);
10233}
10234
10235LayerTestResult<int16_t, 4> StridedTransposeConvolution2dInt16NhwcTest(
10236 armnn::IWorkloadFactory& workloadFactory,
10237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10238{
10239 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10240 workloadFactory,
10241 memoryManager,
10242 true,
10243 armnn::DataLayout::NHWC);
10244}
10245
10246// Strided unbiased
10247LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNchwTest(
10248 armnn::IWorkloadFactory& workloadFactory,
10249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10250{
10251 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10252 workloadFactory,
10253 memoryManager,
10254 false,
10255 armnn::DataLayout::NCHW);
10256}
10257
10258LayerTestResult<float, 4> UnbiasedStridedTransposeConvolution2dFloatNhwcTest(
10259 armnn::IWorkloadFactory& workloadFactory,
10260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10261{
10262 return StridedTransposeConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
10263 workloadFactory,
10264 memoryManager,
10265 false,
10266 armnn::DataLayout::NHWC);
10267}
10268
10269LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NchwTest(
10270 armnn::IWorkloadFactory& workloadFactory,
10271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10272{
10273 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10274 workloadFactory,
10275 memoryManager,
10276 false,
10277 armnn::DataLayout::NCHW);
10278}
10279
10280LayerTestResult<uint8_t, 4> UnbiasedStridedTransposeConvolution2dUint8NhwcTest(
10281 armnn::IWorkloadFactory& workloadFactory,
10282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10283{
10284 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
10285 workloadFactory,
10286 memoryManager,
10287 false,
10288 armnn::DataLayout::NHWC);
10289}
10290
10291LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NchwTest(
10292 armnn::IWorkloadFactory& workloadFactory,
10293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10294{
10295 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10296 workloadFactory,
10297 memoryManager,
10298 false,
10299 armnn::DataLayout::NCHW);
10300}
10301
10302LayerTestResult<int16_t, 4> UnbiasedStridedTransposeConvolution2dInt16NhwcTest(
10303 armnn::IWorkloadFactory& workloadFactory,
10304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
10305{
10306 return StridedTransposeConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
10307 workloadFactory,
10308 memoryManager,
10309 false,
10310 armnn::DataLayout::NHWC);
Narumol Prangnawarate6eaf662019-07-08 08:57:17 +010010311}