blob: de3c857399902d19e71101be408baf460b2889bd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
Nina Drozd8ed4b8c2019-05-29 10:41:04 +010033#include "FloorTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000034#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000586template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
587 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000588LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
589 armnn::IWorkloadFactory& workloadFactory,
590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
591 float qScale,
592 int32_t qOffset,
593 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000594 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100595{
telsoa01c577f2c2018-08-31 09:22:23 +0100596 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000597 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100598 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
599 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
600 0, 1, 2, 3, 4,
601 5, 6, 7, 8, 9,
602 10, 11, 12, 13, 14,
603 15, 16, 17, 18, 19,
604 20, 21, 22, 23, 24,
605
606 25, 26, 27, 28, 29,
607 30, 31, 32, 33, 34,
608 35, 36, 37, 38, 39,
609 40, 41, 42, 43, 44,
610 45, 46, 47, 48, 49
611 })));
612
telsoa01c577f2c2018-08-31 09:22:23 +0100613 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000614 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100615 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
616 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
617 32, 31, 30, 29,
618 28, 27, 26, 25,
619 24, 23, 22, 21,
620 20, 19, 18, 17,
621
622 16, 15, 14, 13,
623 12, 11, 10, 9,
624 8, 7, 6, 5,
625 4, 3, 2, 1
626 })));
627
telsoa01c577f2c2018-08-31 09:22:23 +0100628 // Expected output is 1 batch of a 2-channel 5x5 image.
629 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100631 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
632 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
633 1062, 1580, 1850, 1530, 1117,
634 2140, 3108, 3500, 2842, 2042,
635 3580, 5068, 5460, 4342, 3062,
636 3618, 5072, 5390, 4248, 2971,
637 3074, 4282, 4510, 3533, 2457,
638 1550, 2284, 2362, 1955, 1428,
639 2910, 4206, 4342, 3528, 2536,
640 3390, 4886, 5022, 4068, 2916,
641 3566, 5056, 5182, 4133, 2922,
642 3100, 4352, 4452, 3517, 2465
643 })));
644
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000645 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
646 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000647 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100648 input,
649 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100650 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100651 expectedOutput,
652 qScale,
653 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100654 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100655 1, // Padding left.
656 1, // Padding top.
657 2, // Padding right.
658 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100659 1, // strideX
660 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000661}
662
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000663template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
664 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
666 armnn::IWorkloadFactory& workloadFactory,
667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
668 float qScale,
669 int32_t qOffset,
670 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100671{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000672 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100673 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
674 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
675 0, 25,
676 1, 26,
677 2, 27,
678 3, 28,
679 4, 29,
680
681 5, 30,
682 6, 31,
683 7, 32,
684 8, 33,
685 9, 34,
686
687 10, 35,
688 11, 36,
689 12, 37,
690 13, 38,
691 14, 39,
692
693 15, 40,
694 16, 41,
695 17, 42,
696 18, 43,
697 19, 44,
698
699 20, 45,
700 21, 46,
701 22, 47,
702 23, 48,
703 24, 49
704 })));
705
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000706 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100707 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
708 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000709 32, 31, 30, 29,
710 28, 27, 26, 25,
711 24, 23, 22, 21,
712 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100713
Matteo Martincigh747ef822018-12-18 09:26:39 +0000714 16, 15, 14, 13,
715 12, 11, 10, 9,
716 8, 7, 6, 5,
717 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100718 })));
719
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000720 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100721 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
722 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
723 1062, 1550,
724 1580, 2284,
725 1850, 2362,
726 1530, 1955,
727 1117, 1428,
728
729 2140, 2910,
730 3108, 4206,
731 3500, 4342,
732 2842, 3528,
733 2042, 2536,
734
735 3580, 3390,
736 5068, 4886,
737 5460, 5022,
738 4342, 4068,
739 3062, 2916,
740
741 3618, 3566,
742 5072, 5056,
743 5390, 5182,
744 4248, 4133,
745 2971, 2922,
746
747 3074, 3100,
748 4282, 4352,
749 4510, 4452,
750 3533, 3517,
751 2457, 2465
752 })));
753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000754 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
755 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000756 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100757 input,
758 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100759 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100760 expectedOutput,
761 qScale,
762 qOffset,
763 1, // Padding left.
764 1, // Padding top.
765 2, // Padding right.
766 2, // Padding bottom.
767 1, // strideX
768 1); // strideY
769}
770
Bruno Goncalves22972f02019-04-26 21:03:24 -0300771template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
772 typename T = armnn::ResolveType<ArmnnType>>
773LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
774 armnn::IWorkloadFactory& workloadFactory,
775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
776 float qScale,
777 int32_t qOffset,
778 bool biasEnabled)
779{
780 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
781 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
782 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
783 0, 0, 0, 0, 0, 0, 0, 0, 0,
784 0, 0, 0, 0, 0, 0, 0, 0, 0,
785 0, 0, 0, 0, 0, 0, 0, 0, 0,
786 0, 0, 0, 1, 1, 1, 0, 0, 0,
787 0, 0, 0, 1, 1, 1, 0, 0, 0,
788 0, 0, 0, 1, 1, 1, 0, 0, 0,
789 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 0, 0, 0, 0, 0, 0, 0, 0, 0,
791 0, 0, 0, 0, 0, 0, 0, 0, 0
792 })));
793
794 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
795 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
796 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
797 1, 2, 3,
798 4, 5, 6,
799 7, 8, 9
800 })));
801
802 uint32_t padLeft = 0;
803 uint32_t padTop = 0;
804 uint32_t padRight = 0;
805 uint32_t padBottom = 0;
806 uint32_t strideX = 1;
807 uint32_t strideY = 1;
808 uint32_t dilationX = 3;
809 uint32_t dilationY = 3;
810
811 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
812 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
813 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
814 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
815 5, 5, 5,
816 5, 5, 5,
817 5, 5, 5
818 })));
819
820 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
821 workloadFactory,
822 memoryManager,
823 input,
824 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100825 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -0300826 expectedOutput,
827 qScale,
828 qOffset,
829 padLeft,
830 padTop,
831 padRight,
832 padBottom,
833 strideX,
834 strideY,
835 dilationX,
836 dilationY);
837
838}
839
telsoa014fcda012018-03-09 14:13:49 +0000840LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000841Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
842 armnn::IWorkloadFactory& workloadFactory,
843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000844 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000845{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000846 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
847 <armnn::DataType::Float32, armnn::DataType::Float32>(
848 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000849}
850
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000851LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
852 armnn::IWorkloadFactory& workloadFactory,
853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000854 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000855{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000856 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000858}
859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000860LayerTestResult<float, 4> DepthwiseConvolution2dTest(
861 armnn::IWorkloadFactory& workloadFactory,
862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
863 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000864 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000865{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000866 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000868}
869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000870LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
871 armnn::IWorkloadFactory& workloadFactory,
872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
873 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100874{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000875 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
876 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100877}
878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000879LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
880 armnn::IWorkloadFactory& workloadFactory,
881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
882 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000883 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000885 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000887}
888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000889LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
890 armnn::IWorkloadFactory& workloadFactory,
891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
892 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000893 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000895 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000896 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100897}
898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000899LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
902 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000903 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000905 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000906 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000907}
908
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000909LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
910 armnn::IWorkloadFactory& workloadFactory,
911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
912 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000913 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000914{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000915 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000916 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000917}
918
Bruno Goncalves22972f02019-04-26 21:03:24 -0300919LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
920 armnn::IWorkloadFactory& workloadFactory,
921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
922{
923 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
924 workloadFactory,
925 memoryManager,
926 0.f,
927 0,
928 false);
929}
930
Ruomei Yan88d44b82019-05-23 14:29:06 +0100931LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
932 armnn::IWorkloadFactory& workloadFactory,
933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
934 bool biasEnabled,
935 const armnn::DataLayout layout)
936{
937 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
938 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
939}
940
941LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
942 armnn::IWorkloadFactory& workloadFactory,
943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
944 bool biasEnabled,
945 const armnn::DataLayout layout)
946{
947 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
948 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
949}
950
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000951LayerTestResult<float, 4> Convolution1dTest(
952 armnn::IWorkloadFactory& workloadFactory,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
954 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000955{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000956 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
957 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000958}
959
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000960LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
961 armnn::IWorkloadFactory& workloadFactory,
962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
963 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000964{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000965 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
966 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000967}
968
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000969LayerTestResult<float,4> CompareConvolution2dTest(
970 armnn::IWorkloadFactory& workloadFactory,
971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
972 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000974 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
975 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000976}
977
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000978LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000979 armnn::IWorkloadFactory& workloadFactory,
980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
981 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000982 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000984 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
985 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000986}
987
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000988LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
989 armnn::IWorkloadFactory& workloadFactory,
990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
991 armnn::IWorkloadFactory& refWorkloadFactory,
992 const armnn::DataLayout layout)
993{
994 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
995 workloadFactory, memoryManager, refWorkloadFactory, layout);
996}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000997
998LayerTestResult<float,4> SimpleNormalizationAcrossTest(
999 armnn::IWorkloadFactory& workloadFactory,
1000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001001{
1002 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1003 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001004 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001005}
1006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001007LayerTestResult<float,4> SimpleNormalizationWithinTest(
1008 armnn::IWorkloadFactory& workloadFactory,
1009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001010{
1011 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1012 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001013 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001014}
1015
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001016LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1017 armnn::IWorkloadFactory& workloadFactory,
1018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001019{
1020 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1021 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001022 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001023}
1024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001025LayerTestResult<float,2> SimpleSoftmaxTest(
1026 armnn::IWorkloadFactory& workloadFactory,
1027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1028 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001030 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001031}
1032
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001033LayerTestResult<float,3> Simple3dSoftmaxTest(
1034 armnn::IWorkloadFactory& workloadFactory,
1035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1036 float beta)
1037{
1038 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1039}
1040
1041LayerTestResult<float,4> Simple4dSoftmaxTest(
1042 armnn::IWorkloadFactory& workloadFactory,
1043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1044 float beta)
1045{
1046 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1047}
1048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001049LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1050 armnn::IWorkloadFactory& workloadFactory,
1051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1052 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001053{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001054 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001055}
1056
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001057LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1058 armnn::IWorkloadFactory& workloadFactory,
1059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1060 float beta)
1061{
1062 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1063}
1064
1065LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1066 armnn::IWorkloadFactory& workloadFactory,
1067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1068 float beta)
1069{
1070 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1071}
1072
nikraj01248683f2019-05-29 16:46:50 +01001073LayerTestResult<int16_t,2> SimpleSoftmaxUint16Test(
1074 armnn::IWorkloadFactory& workloadFactory,
1075 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1076 float beta)
1077{
1078 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1079}
1080
1081LayerTestResult<int16_t,3> Simple3dSoftmaxUint16Test(
1082 armnn::IWorkloadFactory& workloadFactory,
1083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1084 float beta)
1085{
1086 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1087}
1088
1089LayerTestResult<int16_t,4> Simple4dSoftmaxUint16Test(
1090 armnn::IWorkloadFactory& workloadFactory,
1091 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1092 float beta)
1093{
1094 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, beta);
1095}
1096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001097LayerTestResult<float,4> CompareNormalizationTest(
1098 armnn::IWorkloadFactory& workloadFactory,
1099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1100 armnn::IWorkloadFactory& refWorkloadFactory,
1101 armnn::NormalizationAlgorithmChannel normChannel,
1102 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001103{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001104 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001105}
1106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001107LayerTestResult<float,2> CompareSoftmaxTest(
1108 armnn::IWorkloadFactory& workloadFactory,
1109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001110 armnn::IWorkloadFactory& refWorkloadFactory,
1111 float beta)
1112{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001113 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1114 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001115}
1116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001117LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1118 armnn::IWorkloadFactory& workloadFactory,
1119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001120 armnn::IWorkloadFactory& refWorkloadFactory,
1121 float beta)
1122{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001123 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1124 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001125}
1126
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001127std::vector<LayerTestResult<float,3>> SplitterTest(
1128 armnn::IWorkloadFactory& workloadFactory,
1129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001130{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001131 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001132}
1133
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001134std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1135 armnn::IWorkloadFactory& workloadFactory,
1136 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001137{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001138 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001139}
1140
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001141LayerTestResult<float, 3> CopyViaSplitterTest(
1142 armnn::IWorkloadFactory& workloadFactory,
1143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001144{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001145 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001146}
1147
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001148LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1149 armnn::IWorkloadFactory& workloadFactory,
1150 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001151{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001152 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001153}
1154
telsoa01c577f2c2018-08-31 09:22:23 +01001155LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001156 armnn::IWorkloadFactory& workloadFactory,
1157 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001158{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001159 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001160 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1161 { 2., 3., 3., 4. }));
1162
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001163 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001164 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1165 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1166 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001167 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001168 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001169}
1170
1171LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001172 armnn::IWorkloadFactory& workloadFactory,
1173 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001174{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001175 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001176 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1177 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1178 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001180 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001181 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1182 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1183 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1184 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1185 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1186 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1187 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1188 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001189 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1190 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001191}
1192
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001193LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1194 armnn::IWorkloadFactory& workloadFactory,
1195 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001196{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001197 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001198 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1199 {2., 3., 3., 4.}));
1200
1201
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001202 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001203 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1204 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1205 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1206
Conor Kennedyb9971c92019-05-07 07:14:23 +01001207 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001208 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001209}
1210
Conor Kennedyb9971c92019-05-07 07:14:23 +01001211LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1214{
1215 const float qScale = 1.0f;
1216 const int32_t qOffset = 0;
1217
1218 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1219 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1220
1221 armnn::TensorInfo inputDesc({2, 2}, datatype);
1222 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1223 std::vector<float>{2., 3., 3., 4.}));
1224
1225 armnn::TensorInfo outputDesc({2, 4}, datatype);
1226 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1227 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1228 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1229
1230 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1231 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1232
1233}
1234
1235LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1236 armnn::IWorkloadFactory& workloadFactory,
1237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1238{
1239 const float qScale = 1.0f;
1240 const int32_t qOffset = 0;
1241
1242 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1243 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1244
1245 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1246 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1247 std::vector<float>({ 2., 3., 3., 4. })));
1248
1249 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1250 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1251 qOffset, std::vector<float>(
1252 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1253 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1254
1255 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1256 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1257}
1258
1259LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1260 armnn::IWorkloadFactory& workloadFactory,
1261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1262{
1263 const float qScale = 2.0f;
1264 const int32_t qOffset = 0;
1265
1266 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1267 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1268
1269 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1270 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1271 qOffset, std::vector<float>(
1272 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1273 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1274
1275 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1276 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1277 qOffset, std::vector<float>(
1278 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1279 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1280 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1281 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1282 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1283 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1284
1285 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1286 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1287}
1288
1289LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1290 armnn::IWorkloadFactory& workloadFactory,
1291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1292{
1293 const float qScale = 1.0f;
1294 const int32_t qOffset = 0;
1295
1296 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1297
1298 armnn::TensorInfo inputDesc({2, 2}, datatype);
1299 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1300 qOffset, std::vector<float>{2., 3., 3., 4.}));
1301
1302 armnn::TensorInfo outputDesc({2, 4}, datatype);
1303 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1304 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1305 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1306
1307 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1308 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1309}
1310
Jim Flynn4ed6c832019-05-20 11:02:46 +01001311LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001312 armnn::IWorkloadFactory& workloadFactory,
1313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001314{
surmeh013537c2c2018-05-18 16:31:43 +01001315 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001316 unsigned int outputHeight = 6;
1317 unsigned int outputChannels = 3;
1318
surmeh013537c2c2018-05-18 16:31:43 +01001319 unsigned int inputWidth1 = 3;
1320 unsigned int inputHeight1 = 6;
1321 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001322
surmeh013537c2c2018-05-18 16:31:43 +01001323 unsigned int inputWidth2 = 3;
1324 unsigned int inputHeight2 = 6;
1325 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001326
telsoa01c577f2c2018-08-31 09:22:23 +01001327 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001328 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1329 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1330 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001331
1332 LayerTestResult<float,3> ret(outputTensorInfo);
1333
telsoa014fcda012018-03-09 14:13:49 +00001334 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001335 {
1336 1.0f, 2.0f, 3.0f,
1337 4.0f, 5.0f, 6.0f,
1338 7.0f, 8.0f, 9.0f,
1339 10.0f, 11.0f, 12.0f,
1340 13.0f, 14.0f, 15.0f,
1341 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001342
surmeh013537c2c2018-05-18 16:31:43 +01001343 19.0f, 20.0f, 21.0f,
1344 22.0f, 23.0f, 24.0f,
1345 25.0f, 26.0f, 27.0f,
1346 28.0f, 29.0f, 30.0f,
1347 31.0f, 32.0f, 33.0f,
1348 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001349
surmeh013537c2c2018-05-18 16:31:43 +01001350 37.0f, 38.0f, 39.0f,
1351 40.0f, 41.0f, 42.0f,
1352 43.0f, 44.0f, 45.0f,
1353 46.0f, 47.0f, 48.0f,
1354 49.0f, 50.0f, 51.0f,
1355 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001356 })
1357 );
1358
telsoa014fcda012018-03-09 14:13:49 +00001359 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1360 {
surmeh013537c2c2018-05-18 16:31:43 +01001361 1.0f, 2.0f, 3.0f,
1362 4.0f, 5.0f, 6.0f,
1363 7.0f, 8.0f, 9.0f,
1364 10.0f, 11.0f, 12.0f,
1365 13.0f, 14.0f, 15.0f,
1366 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001367
surmeh013537c2c2018-05-18 16:31:43 +01001368 19.0f, 20.0f, 21.0f,
1369 22.0f, 23.0f, 24.0f,
1370 25.0f, 26.0f, 27.0f,
1371 28.0f, 29.0f, 30.0f,
1372 31.0f, 32.0f, 33.0f,
1373 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001374 })
1375 );
1376
1377 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1378 {
surmeh013537c2c2018-05-18 16:31:43 +01001379 37.0f, 38.0f, 39.0f,
1380 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001381 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001382 46.0f, 47.0f, 48.0f,
1383 49.0f, 50.0f, 51.0f,
1384 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001385 })
1386 );
1387
telsoa01c577f2c2018-08-31 09:22:23 +01001388 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001389 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001390
telsoa01c577f2c2018-08-31 09:22:23 +01001391 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001392 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001393
telsoa014fcda012018-03-09 14:13:49 +00001394 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1395
1396 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1397
1398 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1399 subTensorsSupported ?
1400 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1401 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1402
1403 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1404 subTensorsSupported ?
1405 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1406 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1407
Jim Flynne242f2d2019-05-22 14:24:13 +01001408 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001409 armnn::WorkloadInfo info;
1410 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1411 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001412 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1413
1414 data.m_ViewOrigins.push_back(window1);
1415 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001416
Jim Flynn4ed6c832019-05-20 11:02:46 +01001417 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001418
1419 inputHandle1->Allocate();
1420 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001421 outputHandle->Allocate();
1422
1423 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1424 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001425
Derek Lambertif30f7d32019-04-09 10:25:02 +01001426 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001427 workload->Execute();
1428
1429 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1430
1431 return ret;
1432}
1433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001434LayerTestResult<float,4> AdditionTest(
1435 armnn::IWorkloadFactory& workloadFactory,
1436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001437{
1438 unsigned int batchSize = 2;
1439 unsigned int channels = 2;
1440 unsigned int height = 2;
1441 unsigned int width = 3;
1442
1443 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1444 armnn::TensorInfo outputTensorInfo;
1445
1446 unsigned int shape[] = {batchSize, channels, height, width};
1447
1448 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1449 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1450 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1451
1452
1453 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1454 {
1455 0.0f, 2.0f, 1.0f,
1456 0.2f, 1.0f, 2.0f,
1457
1458 1.0f, 2.0f, 1.0f,
1459 0.2f, 1.0f, 2.0f,
1460
1461 0.0f, 2.0f, 1.0f,
1462 4.2f, 1.0f, 2.0f,
1463
1464 0.0f, 0.0f, 1.0f,
1465 0.2f, 1.0f, 2.0f,
1466 }));
1467
1468 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1469 {
1470 1.0f, 2.0f, 1.0f,
1471 0.0f, 1.0f, 2.0f,
1472
1473 1.0f, 2.0f, -2.0f,
1474 0.2f, 1.0f, 2.0f,
1475
1476 0.0f, 2.0f, 1.0f,
1477 4.2f, 0.0f, -3.0f,
1478
1479 0.0f, 0.0f, 1.0f,
1480 0.7f, 1.0f, 5.0f,
1481 }));
1482
1483 LayerTestResult<float,4> ret(outputTensorInfo);
1484 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1485 {
1486 1.0f, 4.0f, 2.0f,
1487 0.2f, 2.0f, 4.0f,
1488
1489 2.0f, 4.0f, -1.0f,
1490 0.4f, 2.0f, 4.0f,
1491
1492 0.0f, 4.0f, 2.0f,
1493 8.4f, 1.0f, -1.0f,
1494
1495 0.0f, 0.0f, 2.0f,
1496 0.9f, 2.0f, 7.0f,
1497 }));
1498
1499 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1500 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1501 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1502
1503 armnn::AdditionQueueDescriptor data;
1504 armnn::WorkloadInfo info;
1505 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1506 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1507 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1508
1509 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1510
1511 inputHandle1->Allocate();
1512 inputHandle2->Allocate();
1513 outputHandle->Allocate();
1514
1515 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1516 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1517
Derek Lambertif30f7d32019-04-09 10:25:02 +01001518 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001519 workload->Execute();
1520
1521 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1522
1523 return ret;
1524}
1525
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001526template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001527LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1528 armnn::IWorkloadFactory& workloadFactory,
1529 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001530 float qScale,
1531 int32_t qOffset)
1532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001533 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1534 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1535 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001536
1537 if (armnn::IsQuantizedType<T>())
1538 {
1539 inputTensorInfo1.SetQuantizationScale(qScale);
1540 inputTensorInfo1.SetQuantizationOffset(qOffset);
1541 inputTensorInfo2.SetQuantizationScale(qScale);
1542 inputTensorInfo2.SetQuantizationOffset(qOffset);
1543 outputTensorInfo.SetQuantizationScale(qScale);
1544 outputTensorInfo.SetQuantizationOffset(qOffset);
1545 }
1546
1547 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1548 {
1549 0.0f,
1550 1.0f,
1551
1552 2.0f,
1553 3.0f,
1554
1555 4.0f,
1556 5.0f,
1557 }));
1558
1559 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1560 {
1561 0.5f, 1.5f, 2.5f,
1562 3.5f, 4.5f, 5.5f,
1563 }));
1564
1565 LayerTestResult<T,4> ret(outputTensorInfo);
1566 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1567 {
1568 0.5f, 1.5f, 2.5f,
1569 4.5f, 5.5f, 6.5f,
1570
1571 2.5f, 3.5f, 4.5f,
1572 6.5f, 7.5f, 8.5f,
1573
1574 4.5f, 5.5f, 6.5f,
1575 8.5f, 9.5f, 10.5f,
1576 }));
1577
1578 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1579 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1580 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1581
1582 armnn::AdditionQueueDescriptor data;
1583 armnn::WorkloadInfo info;
1584 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1585 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1586 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1587
1588 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1589
1590 inputHandle1->Allocate();
1591 inputHandle2->Allocate();
1592 outputHandle->Allocate();
1593
1594 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1595 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1596
Derek Lambertif30f7d32019-04-09 10:25:02 +01001597 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001598 workload->Execute();
1599
1600 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1601
1602 return ret;
1603}
1604
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001605template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001606LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1607 armnn::IWorkloadFactory& workloadFactory,
1608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001609 float qScale,
1610 int32_t qOffset)
1611{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001612 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1613 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1614 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001615
1616 if (armnn::IsQuantizedType<T>())
1617 {
1618 inputTensorInfo1.SetQuantizationScale(qScale);
1619 inputTensorInfo1.SetQuantizationOffset(qOffset);
1620 inputTensorInfo2.SetQuantizationScale(qScale);
1621 inputTensorInfo2.SetQuantizationOffset(qOffset);
1622 outputTensorInfo.SetQuantizationScale(qScale);
1623 outputTensorInfo.SetQuantizationOffset(qOffset);
1624 }
1625
1626 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1627 {
1628 0.0f, 1.0f, 2.0f,
1629 3.0f, 4.0f, 5.0f,
1630 6.0f, 7.0f, 8.0f,
1631 9.0f, 10.0f, 11.0f,
1632 12.0f, 13.0f, 14.0f,
1633 15.0f, 16.0f, 17.0f,
1634 }));
1635
1636 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1637 {
1638 0.5f,
1639 }));
1640
1641 LayerTestResult<T,4> ret(outputTensorInfo);
1642 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1643 {
1644 0.5f, 1.5f, 2.5f,
1645 3.5f, 4.5f, 5.5f,
1646 6.5f, 7.5f, 8.5f,
1647 9.5f, 10.5f, 11.5f,
1648 12.5f, 13.5f, 14.5f,
1649 15.5f, 16.5f, 17.5f,
1650 }));
1651
1652 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1653 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1654 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1655
1656 armnn::AdditionQueueDescriptor data;
1657 armnn::WorkloadInfo info;
1658 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1659 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1660 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1661
1662 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1663
1664 inputHandle1->Allocate();
1665 inputHandle2->Allocate();
1666 outputHandle->Allocate();
1667
1668 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1669 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1670
Derek Lambertif30f7d32019-04-09 10:25:02 +01001671 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001672 workload->Execute();
1673
1674 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1675
1676 return ret;
1677}
1678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001679LayerTestResult<float, 4> AdditionBroadcastTest(
1680 armnn::IWorkloadFactory& workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001682{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001683 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1684 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001685}
1686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001687LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1688 armnn::IWorkloadFactory& workloadFactory,
1689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001690{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001691 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1692 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001693}
1694
Sadik Armagan2999a022019-04-09 14:20:12 +01001695LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1696 armnn::IWorkloadFactory& workloadFactory,
1697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1698{
1699 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1700 workloadFactory, memoryManager, 2.f, 0);
1701}
1702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001703LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1704 armnn::IWorkloadFactory& workloadFactory,
1705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001706{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001707 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1708 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001709}
1710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001711LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1712 armnn::IWorkloadFactory& workloadFactory,
1713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001714{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001715 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1716 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001717}
1718
Sadik Armagan2999a022019-04-09 14:20:12 +01001719LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1720 armnn::IWorkloadFactory& workloadFactory,
1721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1722{
1723 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1724 workloadFactory, memoryManager, 0.1333333f, 0);
1725}
1726
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001727LayerTestResult<float,4> CompareAdditionTest(
1728 armnn::IWorkloadFactory& workloadFactory,
1729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1730 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001731{
1732 unsigned int batchSize = 4;
1733 unsigned int channels = 1;
1734 unsigned int height = 2;
1735 unsigned int width = 3;
1736
1737 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1738 armnn::TensorInfo outputTensorInfo;
1739
1740 unsigned int shape[] = {batchSize, channels, height, width};
1741
1742 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1743 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1744 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1745
1746 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1747 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1748
1749 LayerTestResult<float,4> ret(outputTensorInfo);
1750
1751 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1752 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1753 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1754
1755 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1756 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1757 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1758
1759 armnn::AdditionQueueDescriptor data;
1760 armnn::WorkloadInfo info;
1761 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1762 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1763 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1764
1765 armnn::AdditionQueueDescriptor refData = data;
1766 armnn::WorkloadInfo refInfo = info;
1767 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1768 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1769 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1770
1771 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1772 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1773
1774 inputHandle1->Allocate();
1775 inputHandle2->Allocate();
1776 outputHandle->Allocate();
1777 inputHandle1Ref->Allocate();
1778 inputHandle2Ref->Allocate();
1779 outputHandleRef->Allocate();
1780
1781 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1782 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1783 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1784 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1785
Derek Lambertif30f7d32019-04-09 10:25:02 +01001786 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001787 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01001788 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001789 workloadRef->Execute();
1790
1791 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1792 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1793
1794 return ret;
1795}
1796
surmeh01bceff2f2018-03-29 16:29:27 +01001797namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001798template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001799LayerTestResult<T, 4> DivisionTestHelper(
1800 armnn::IWorkloadFactory& workloadFactory,
1801 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1802 const unsigned int shape0[4],
1803 const std::vector<T>& values0,
1804 float scale0,
1805 int32_t offset0,
1806 const unsigned int shape1[4],
1807 const std::vector<T> & values1,
1808 float scale1,
1809 int32_t offset1,
1810 const unsigned int outShape[4],
1811 const std::vector<T> & outValues,
1812 float outScale,
1813 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001814{
Sadik Armagan2999a022019-04-09 14:20:12 +01001815 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1816 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1817 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001818
David Beck5cd01f32018-09-12 16:00:08 +01001819 inputTensorInfo0.SetQuantizationScale(scale0);
1820 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001821
David Beck5cd01f32018-09-12 16:00:08 +01001822 inputTensorInfo1.SetQuantizationScale(scale1);
1823 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001824
David Beck5cd01f32018-09-12 16:00:08 +01001825 outputTensorInfo.SetQuantizationScale(outScale);
1826 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001827
David Beck5cd01f32018-09-12 16:00:08 +01001828 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1829 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001830
David Beck5cd01f32018-09-12 16:00:08 +01001831 LayerTestResult<T, 4> result(outputTensorInfo);
1832 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001833
David Beck5cd01f32018-09-12 16:00:08 +01001834 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1835 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1836 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001837
David Beck5cd01f32018-09-12 16:00:08 +01001838 armnn::DivisionQueueDescriptor data;
1839 armnn::WorkloadInfo info;
1840 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1841 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1842 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001843
David Beck5cd01f32018-09-12 16:00:08 +01001844 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001845
David Beck5cd01f32018-09-12 16:00:08 +01001846 inputHandle0->Allocate();
1847 inputHandle1->Allocate();
1848 outputHandle->Allocate();
1849
1850 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1851 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1852
Derek Lambertif30f7d32019-04-09 10:25:02 +01001853 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01001854 workload->Execute();
1855
1856 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1857
1858 return result;
1859}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001860} // anonymous namespace
1861
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001862LayerTestResult<float,4> DivisionByZeroTest(
1863 armnn::IWorkloadFactory& workloadFactory,
1864 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001865{
1866 const unsigned int width = 2;
1867 const unsigned int height = 2;
1868 const unsigned int channelCount = 2;
1869 const unsigned int batchSize = 2;
1870
1871 unsigned int shape[] = { batchSize, channelCount, height, width };
1872
1873 std::vector<float> input0({
1874 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1875 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1876
1877 std::vector<float> input1({
1878 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1879 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1880
1881 std::vector<float> output({
1882 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1883 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1884
Sadik Armagan2999a022019-04-09 14:20:12 +01001885 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1886 memoryManager,
1887 shape, input0, 1.0f, 0,
1888 shape, input1, 1.0f, 0,
1889 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001890}
1891
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001892LayerTestResult<float,4> DivisionTest(
1893 armnn::IWorkloadFactory& workloadFactory,
1894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001895{
1896 const unsigned int width = 2;
1897 const unsigned int height = 2;
1898 const unsigned int channelCount = 2;
1899 const unsigned int batchSize = 2;
1900
1901 unsigned int shape[] = { batchSize, channelCount, height, width };
1902
1903 std::vector<float> input0({
1904 2, 2, 2, 2, 3, 3, 3, 3,
1905 4, 4, 4, 4, 5, 5, 5, 5 });
1906
1907 std::vector<float> input1({
1908 1, 1, 1, 1, 2, 2, 2, 2,
1909 4, 4, 4, 4, 4, 4, 4, 4 });
1910
1911 std::vector<float> output({
1912 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1913 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1914
David Beck5cd01f32018-09-12 16:00:08 +01001915
Sadik Armagan2999a022019-04-09 14:20:12 +01001916 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1917 memoryManager,
1918 shape, input0, 1.0f, 0,
1919 shape, input1, 1.0f, 0,
1920 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001921}
1922
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001923LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1924 armnn::IWorkloadFactory& workloadFactory,
1925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001926{
1927 unsigned int shape0[] = { 1, 2, 2, 2 };
1928 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1929
1930 unsigned int shape1[] = { 1, 1, 1, 1 };
1931 std::vector<float> input1({ 2 });
1932
1933 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1934
David Beck5cd01f32018-09-12 16:00:08 +01001935
Sadik Armagan2999a022019-04-09 14:20:12 +01001936 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1937 memoryManager,
1938 shape0, input0, 1.0f, 0,
1939 shape1, input1, 1.0f, 0,
1940 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001941}
1942
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001943LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1944 armnn::IWorkloadFactory& workloadFactory,
1945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001946{
1947 unsigned int shape0[] = { 1, 3, 3, 2 };
1948 std::vector<float> input0({
1949 1, 4, 3, 8, 5, 12,
1950 7, 16, 9, 20, 11, 24,
1951 13, 28, 15, 32, 17, 36});
1952
1953 unsigned int shape1[] = { 1, 1, 1, 2 };
1954 std::vector<float> input1({ 1, 2 });
1955
1956 std::vector<float> output({
1957 1, 2, 3, 4, 5, 6,
1958 7, 8, 9, 10, 11, 12,
1959 13, 14, 15, 16, 17, 18});
1960
Sadik Armagan2999a022019-04-09 14:20:12 +01001961 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1962 memoryManager,
1963 shape0, input0, 1.0f, 0,
1964 shape1, input1, 1.0f, 0,
1965 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001966}
1967
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001968LayerTestResult<uint8_t,4> DivisionUint8Test(
1969 armnn::IWorkloadFactory& workloadFactory,
1970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001971{
1972 const unsigned int width = 2;
1973 const unsigned int height = 2;
1974 const unsigned int channelCount = 2;
1975 const unsigned int batchSize = 2;
1976
1977 unsigned int shape[] = { batchSize, channelCount, height, width };
1978
1979 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1980 4, 4, 4, 4, 5, 5, 5, 5 });
1981
1982 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1983 4, 4, 4, 4, 4, 4, 4, 4 });
1984
1985 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1986 4, 4, 4, 4, 5, 5, 5, 5});
1987
1988
Sadik Armagan2999a022019-04-09 14:20:12 +01001989 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1990 memoryManager,
1991 shape, input0, 1.0f, 0,
1992 shape, input1, 1.0f, 0,
1993 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001994}
1995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001996LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1997 armnn::IWorkloadFactory& workloadFactory,
1998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001999{
2000 unsigned int shape0[] = { 1, 2, 2, 2 };
2001 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2002
2003 unsigned int shape1[] = { 1, 1, 1, 1 };
2004 std::vector<uint8_t> input1({ 2 });
2005
2006 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2007
Sadik Armagan2999a022019-04-09 14:20:12 +01002008 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2009 memoryManager,
2010 shape0, input0, 1.0f, 0,
2011 shape1, input1, 1.0f, 0,
2012 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01002013}
2014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002015LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
2016 armnn::IWorkloadFactory& workloadFactory,
2017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01002018{
2019 unsigned int shape0[] = { 1, 3, 3, 2 };
2020 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
2021 7, 16, 9, 20, 11, 24,
2022 13, 28, 15, 32, 17, 36});
2023
2024 unsigned int shape1[] = { 1, 1, 1, 2 };
2025 std::vector<uint8_t> input1({ 1, 2 });
2026
2027 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2028 7, 8, 9, 10, 11, 12,
2029 13, 14, 15, 16, 17, 18});
2030
Sadik Armagan2999a022019-04-09 14:20:12 +01002031 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2032 memoryManager,
2033 shape0, input0, 1.0f, 0,
2034 shape1, input1, 1.0f, 0,
2035 shape0, output, 1.0f, 0);
2036}
2037
2038LayerTestResult<int16_t,4> DivisionInt16Test(
2039 armnn::IWorkloadFactory& workloadFactory,
2040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2041{
2042 unsigned int shape[] = { 2, 2, 2, 2 };
2043
2044 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2045 4, 4, 4, 4, 5, 5, 5, 5 });
2046
2047 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2048 4, 4, 4, 4, 4, 4, 4, 4 });
2049
2050 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2051 4, 4, 4, 4, 5, 5, 5, 5});
2052
2053
2054 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2055 memoryManager,
2056 shape, input0, 1.0f, 0,
2057 shape, input1, 1.0f, 0,
2058 shape, output, 0.25f, 0);
2059}
2060
2061LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2062 armnn::IWorkloadFactory& workloadFactory,
2063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2064{
2065 unsigned int shape0[] = { 1, 2, 2, 2 };
2066 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2067
2068 unsigned int shape1[] = { 1, 1, 1, 1 };
2069 std::vector<int16_t> input1({ 2 });
2070
2071 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2072
2073 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2074 memoryManager,
2075 shape0, input0, 1.0f, 0,
2076 shape1, input1, 1.0f, 0,
2077 shape0, output, 1.0f, 0);
2078}
2079
2080LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2081 armnn::IWorkloadFactory& workloadFactory,
2082 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2083{
2084 unsigned int shape0[] = { 1, 3, 3, 2 };
2085 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2086 7, 16, 9, 20, 11, 24,
2087 13, 28, 15, 32, 17, 36});
2088
2089 unsigned int shape1[] = { 1, 1, 1, 2 };
2090 std::vector<int16_t> input1({ 1, 2 });
2091
2092 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2093 7, 8, 9, 10, 11, 12,
2094 13, 14, 15, 16, 17, 18});
2095
2096 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2097 memoryManager,
2098 shape0, input0, 1.0f, 0,
2099 shape1, input1, 1.0f, 0,
2100 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002101}
2102
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002103template<typename DescriptorType>
2104std::unique_ptr<armnn::IWorkload> CreateWorkload(
2105 const armnn::IWorkloadFactory& workloadFactory,
2106 const armnn::WorkloadInfo& info,
2107 const DescriptorType& descriptor)
2108{
2109 return CreateWorkload(workloadFactory, info, descriptor);
2110};
2111
2112template<>
2113std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2114 const armnn::IWorkloadFactory& workloadFactory,
2115 const armnn::WorkloadInfo& info,
2116 const armnn::MaximumQueueDescriptor& descriptor)
2117{
2118 return workloadFactory.CreateMaximum(descriptor, info);
2119}
2120
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002121template<>
2122std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2123 const armnn::IWorkloadFactory& workloadFactory,
2124 const armnn::WorkloadInfo& info,
2125 const armnn::MinimumQueueDescriptor& descriptor)
2126{
2127 return workloadFactory.CreateMinimum(descriptor, info);
2128}
2129
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002130template<>
2131std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2132 const armnn::IWorkloadFactory& workloadFactory,
2133 const armnn::WorkloadInfo& info,
2134 const armnn::EqualQueueDescriptor& descriptor)
2135{
2136 return workloadFactory.CreateEqual(descriptor, info);
2137}
2138
FrancisMurtagh878f0232018-12-19 10:56:15 +00002139template<>
2140std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2141 const armnn::IWorkloadFactory& workloadFactory,
2142 const armnn::WorkloadInfo& info,
2143 const armnn::GreaterQueueDescriptor& descriptor)
2144{
2145 return workloadFactory.CreateGreater(descriptor, info);
2146}
2147
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002148namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002149
2150template <typename Descriptor,
2151 armnn::DataType ArmnnTypeInput,
2152 armnn::DataType ArmnnTypeOutput,
2153 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2154 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2155LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2156 armnn::IWorkloadFactory & workloadFactory,
2157 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2158 const unsigned int shape0[4], std::vector<TInput> values0,
2159 const unsigned int shape1[4], std::vector<TInput> values1,
2160 const unsigned int outShape[4], std::vector<TOutput> outValues,
2161 float qScale = 0.0f, int qOffset = 0)
2162{
2163 const size_t dimensionCount = 4;
2164 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2165 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2166 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2167
2168 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2169 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2170
2171 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002172 {
kevmay012b4d88e2019-01-24 14:05:09 +00002173 inputTensorInfo0.SetQuantizationScale(qScale);
2174 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002175
kevmay012b4d88e2019-01-24 14:05:09 +00002176 inputTensorInfo1.SetQuantizationScale(qScale);
2177 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002178
kevmay012b4d88e2019-01-24 14:05:09 +00002179 outputTensorInfo.SetQuantizationScale(qScale);
2180 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002181 }
kevmay012b4d88e2019-01-24 14:05:09 +00002182
2183 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2184
2185 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2186 {
2187 ret.compareBoolean = true;
2188 }
2189
2190 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2191 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2192 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2193
2194 Descriptor data;
2195 armnn::WorkloadInfo info;
2196 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2197 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2198 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2199 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2200
2201 inputHandle0->Allocate();
2202 inputHandle1->Allocate();
2203 outputHandle->Allocate();
2204
2205 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2206 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2207
Derek Lambertif30f7d32019-04-09 10:25:02 +01002208 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002209 ExecuteWorkload(*workload, memoryManager);
2210
2211 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2212
2213 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2214 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002215}
2216
kevmay012b4d88e2019-01-24 14:05:09 +00002217template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2218LayerTestResult<T, 4> ElementwiseTestHelper(
2219 armnn::IWorkloadFactory & workloadFactory,
2220 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2221 const unsigned int shape0[4], std::vector<T> values0,
2222 const unsigned int shape1[4], std::vector<T> values1,
2223 const unsigned int outShape[4], std::vector<T> outValues,
2224 float qScale = 0.0f, int qOffset = 0)
2225{
2226 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2227 (workloadFactory,
2228 memoryManager,
2229 shape0,
2230 values0,
2231 shape1,
2232 values1,
2233 outShape,
2234 outValues,
2235 qScale,
2236 qOffset);
2237}
2238}
2239
2240LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002242{
2243 const unsigned int width = 2;
2244 const unsigned int height = 2;
2245 const unsigned int channelCount = 2;
2246 const unsigned int batchSize = 2;
2247
2248 unsigned int shape[] = { batchSize, channelCount, height, width };
2249
2250 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2251 3, 3, 3, 3, 4, 4, 4, 4 });
2252
2253 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2254 5, 5, 5, 5, 4, 4, 4, 4 });
2255
kevmay012b4d88e2019-01-24 14:05:09 +00002256 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2257 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002258
kevmay012b4d88e2019-01-24 14:05:09 +00002259 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002260 workloadFactory,
2261 memoryManager,
2262 shape,
2263 input0,
2264 shape,
2265 input1,
2266 shape,
2267 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002268}
2269
kevmay012b4d88e2019-01-24 14:05:09 +00002270LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002271 armnn::IWorkloadFactory& workloadFactory,
2272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2273{
2274 unsigned int shape0[] = { 1, 2, 2, 2 };
2275 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2276
2277 unsigned int shape1[] = { 1, 1, 1, 1 };
2278 std::vector<float> input1({ 1 });
2279
kevmay012b4d88e2019-01-24 14:05:09 +00002280 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002281
kevmay012b4d88e2019-01-24 14:05:09 +00002282 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002283 workloadFactory,
2284 memoryManager,
2285 shape0,
2286 input0,
2287 shape1,
2288 input1,
2289 shape0,
2290 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002291}
2292
kevmay012b4d88e2019-01-24 14:05:09 +00002293LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002294 armnn::IWorkloadFactory& workloadFactory,
2295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2296{
2297 const unsigned int shape0[] = { 1, 2, 2, 3 };
2298 const unsigned int shape1[] = { 1, 1, 1, 3 };
2299
2300 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2301 7, 8, 9, 10, 11, 12 });
2302
2303 std::vector<float> input1({ 1, 2, 3});
2304
kevmay012b4d88e2019-01-24 14:05:09 +00002305 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2306 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002307
kevmay012b4d88e2019-01-24 14:05:09 +00002308 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002309 workloadFactory,
2310 memoryManager,
2311 shape0,
2312 input0,
2313 shape1,
2314 input1,
2315 shape0,
2316 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002317}
2318
2319LayerTestResult<uint8_t, 4> EqualUint8Test(
2320 armnn::IWorkloadFactory& workloadFactory,
2321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2322{
2323 unsigned int shape[] = { 2, 2, 2, 2 };
2324
2325 // See dequantized values to the right.
2326 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002327 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002328
2329 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2330 3, 3, 3, 3, 5, 5, 5, 5 });
2331
2332 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2333 1, 1, 1, 1, 0, 0, 0, 0 });
2334
kevmay012b4d88e2019-01-24 14:05:09 +00002335 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2336 armnn::DataType::QuantisedAsymm8,
2337 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002338 workloadFactory,
2339 memoryManager,
2340 shape,
2341 input0,
2342 shape,
2343 input1,
2344 shape,
2345 output,
2346 1.0f,
2347 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002348}
2349
2350LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2351 armnn::IWorkloadFactory& workloadFactory,
2352 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2353{
2354 const unsigned int shape0[] = { 1, 2, 2, 3 };
2355 const unsigned int shape1[] = { 1, 1, 1, 1 };
2356
2357 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2358 7, 8, 9, 10, 11, 12 });
2359
2360 std::vector<uint8_t> input1({ 1 });
2361
2362 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2363 0, 0, 0, 0, 0, 0 });
2364
kevmay012b4d88e2019-01-24 14:05:09 +00002365 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2366 armnn::DataType::QuantisedAsymm8,
2367 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002368 workloadFactory,
2369 memoryManager,
2370 shape0,
2371 input0,
2372 shape1,
2373 input1,
2374 shape0,
2375 output,
2376 1.0f,
2377 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002378}
2379
2380LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2381 armnn::IWorkloadFactory& workloadFactory,
2382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2383{
2384 const unsigned int shape0[] = { 1, 2, 2, 3 };
2385 const unsigned int shape1[] = { 1, 1, 1, 3 };
2386
2387 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2388 7, 8, 9, 10, 11, 12 });
2389
2390 std::vector<uint8_t> input1({ 1, 1, 3});
2391
2392 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2393 0, 0, 0, 0, 0, 0 });
2394
kevmay012b4d88e2019-01-24 14:05:09 +00002395 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2396 armnn::DataType::QuantisedAsymm8,
2397 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002398 workloadFactory,
2399 memoryManager,
2400 shape0,
2401 input0,
2402 shape1,
2403 input1,
2404 shape0,
2405 output,
2406 1.0f,
2407 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002408}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002409
kevmay012b4d88e2019-01-24 14:05:09 +00002410LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2412{
2413 const unsigned int width = 2;
2414 const unsigned int height = 2;
2415 const unsigned int channelCount = 2;
2416 const unsigned int batchSize = 2;
2417
2418 unsigned int shape[] = { batchSize, channelCount, height, width };
2419
2420 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2421 3, 3, 3, 3, 4, 4, 4, 4 });
2422
2423 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2424 5, 5, 5, 5, 4, 4, 4, 4 });
2425
kevmay012b4d88e2019-01-24 14:05:09 +00002426 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2427 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002428
kevmay012b4d88e2019-01-24 14:05:09 +00002429 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002430 workloadFactory,
2431 memoryManager,
2432 shape,
2433 input0,
2434 shape,
2435 input1,
2436 shape,
2437 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002438}
2439
kevmay012b4d88e2019-01-24 14:05:09 +00002440LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002441 armnn::IWorkloadFactory& workloadFactory,
2442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2443{
2444 unsigned int shape0[] = { 1, 2, 2, 2 };
2445 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2446
2447 unsigned int shape1[] = { 1, 1, 1, 1 };
2448 std::vector<float> input1({ 1 });
2449
kevmay012b4d88e2019-01-24 14:05:09 +00002450 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002451
kevmay012b4d88e2019-01-24 14:05:09 +00002452 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002453 workloadFactory,
2454 memoryManager,
2455 shape0,
2456 input0,
2457 shape1,
2458 input1,
2459 shape0,
2460 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002461}
2462
kevmay012b4d88e2019-01-24 14:05:09 +00002463LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002464 armnn::IWorkloadFactory& workloadFactory,
2465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2466{
2467 const unsigned int shape0[] = { 1, 2, 2, 3 };
2468 const unsigned int shape1[] = { 1, 1, 1, 3 };
2469
2470 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2471 7, 8, 9, 10, 11, 12 });
2472
2473 std::vector<float> input1({ 1, 3, 2});
2474
kevmay012b4d88e2019-01-24 14:05:09 +00002475 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2476 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002477
kevmay012b4d88e2019-01-24 14:05:09 +00002478 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002479 workloadFactory,
2480 memoryManager,
2481 shape0,
2482 input0,
2483 shape1,
2484 input1,
2485 shape0,
2486 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002487}
2488
2489LayerTestResult<uint8_t, 4> GreaterUint8Test(
2490 armnn::IWorkloadFactory& workloadFactory,
2491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2492{
2493 unsigned int shape[] = { 2, 2, 2, 2 };
2494
2495 // See dequantized values to the right.
2496 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2497 3, 3, 3, 3, 5, 5, 5, 5 });
2498
2499 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2500 2, 2, 2, 2, 5, 5, 5, 5 });
2501
2502 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2503 1, 1, 1, 1, 0, 0, 0, 0 });
2504
kevmay012b4d88e2019-01-24 14:05:09 +00002505 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2506 armnn::DataType::QuantisedAsymm8,
2507 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002508 workloadFactory,
2509 memoryManager,
2510 shape,
2511 input0,
2512 shape,
2513 input1,
2514 shape,
2515 output,
2516 1.0f,
2517 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002518}
2519
2520LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2521 armnn::IWorkloadFactory& workloadFactory,
2522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2523{
2524 const unsigned int shape0[] = { 1, 2, 2, 3 };
2525 const unsigned int shape1[] = { 1, 1, 1, 1 };
2526
2527 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2528 7, 8, 9, 10, 11, 12 });
2529
2530 std::vector<uint8_t> input1({ 1 });
2531
2532 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2533 1, 1, 1, 1, 1, 1 });
2534
kevmay012b4d88e2019-01-24 14:05:09 +00002535 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2536 armnn::DataType::QuantisedAsymm8,
2537 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002538 workloadFactory,
2539 memoryManager,
2540 shape0,
2541 input0,
2542 shape1,
2543 input1,
2544 shape0,
2545 output,
2546 1.0f,
2547 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002548}
2549
2550LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2551 armnn::IWorkloadFactory& workloadFactory,
2552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2553{
2554 const unsigned int shape0[] = { 1, 2, 2, 3 };
2555 const unsigned int shape1[] = { 1, 1, 1, 3 };
2556
2557 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2558 7, 8, 9, 10, 11, 12 });
2559
2560 std::vector<uint8_t> input1({ 1, 1, 3});
2561
2562 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2563 1, 1, 1, 1, 1, 1 });
2564
kevmay012b4d88e2019-01-24 14:05:09 +00002565 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2566 armnn::DataType::QuantisedAsymm8,
2567 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002568 workloadFactory,
2569 memoryManager,
2570 shape0,
2571 input0,
2572 shape1,
2573 input1,
2574 shape0,
2575 output,
2576 1.0f,
2577 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002578}
2579
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002580LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2582{
2583 const unsigned int width = 2;
2584 const unsigned int height = 2;
2585 const unsigned int channelCount = 2;
2586 const unsigned int batchSize = 2;
2587
2588 unsigned int shape[] = { batchSize, channelCount, height, width };
2589
2590 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2591 3, 3, 3, 3, 4, 4, 4, 4 });
2592
2593 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2594 4, 4, 4, 4, 5, 5, 5, 5 });
2595
2596 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2597 4, 4, 4, 4, 5, 5, 5, 5 });
2598
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002599 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2600 workloadFactory,
2601 memoryManager,
2602 shape,
2603 input0,
2604 shape,
2605 input1,
2606 shape,
2607 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002608}
2609
2610LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2611 armnn::IWorkloadFactory& workloadFactory,
2612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2613{
2614 unsigned int shape0[] = { 1, 2, 2, 2 };
2615 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2616
2617 unsigned int shape1[] = { 1, 1, 1, 1 };
2618 std::vector<float> input1({ 2 });
2619
2620 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2621
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002622 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2623 workloadFactory,
2624 memoryManager,
2625 shape0,
2626 input0,
2627 shape1,
2628 input1,
2629 shape0,
2630 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002631}
2632
2633LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2634 armnn::IWorkloadFactory& workloadFactory,
2635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2636{
2637 const unsigned int shape0[] = { 1, 2, 2, 3 };
2638 const unsigned int shape1[] = { 1, 1, 1, 3 };
2639
2640 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2641 7, 8, 9, 10, 11, 12 });
2642
2643 std::vector<float> input1({ 1, 2, 3});
2644
2645 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002646 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002647
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002648 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2649 workloadFactory,
2650 memoryManager,
2651 shape0,
2652 input0,
2653 shape1,
2654 input1,
2655 shape0,
2656 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002657}
2658
2659LayerTestResult<uint8_t, 4> MaximumUint8Test(
2660 armnn::IWorkloadFactory& workloadFactory,
2661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2662{
2663 unsigned int shape[] = { 2, 2, 2, 2 };
2664
2665 // See dequantized values to the right.
2666 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2667 3, 3, 3, 3, 4, 4, 4, 4 });
2668
2669 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2670 4, 4, 4, 4, 5, 5, 5, 5 });
2671
2672 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2673 4, 4, 4, 4, 5, 5, 5, 5 });
2674
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002675 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2676 workloadFactory,
2677 memoryManager,
2678 shape,
2679 input0,
2680 shape,
2681 input1,
2682 shape,
2683 output,
2684 1.0f,
2685 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002686}
2687
2688LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2689 armnn::IWorkloadFactory& workloadFactory,
2690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2691{
2692 const unsigned int shape0[] = { 1, 2, 2, 3 };
2693 const unsigned int shape1[] = { 1, 1, 1, 1 };
2694
2695 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2696 7, 8, 9, 10, 11, 12 });
2697
2698 std::vector<uint8_t> input1({2});
2699
2700 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2701 7, 8, 9, 10, 11, 12 });
2702
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002703 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2704 workloadFactory,
2705 memoryManager,
2706 shape0,
2707 input0,
2708 shape1,
2709 input1,
2710 shape0,
2711 output,
2712 1.0f,
2713 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002714}
2715
2716LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2717 armnn::IWorkloadFactory& workloadFactory,
2718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2719{
2720 const unsigned int shape0[] = { 1, 2, 2, 3 };
2721 const unsigned int shape1[] = { 1, 1, 1, 3 };
2722
2723 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2724 7, 8, 9, 10, 11, 12 });
2725
2726 std::vector<uint8_t> input1({ 1, 10, 3});
2727
2728 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2729 7, 10, 9, 10, 11, 12 });
2730
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002731 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2732 workloadFactory,
2733 memoryManager,
2734 shape0,
2735 input0,
2736 shape1,
2737 input1,
2738 shape0,
2739 output,
2740 1.0f,
2741 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002742}
2743
Sadik Armagan2999a022019-04-09 14:20:12 +01002744LayerTestResult<int16_t, 4> MaximumInt16Test(
2745 armnn::IWorkloadFactory& workloadFactory,
2746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2747{
2748 unsigned int shape[] = { 2, 2, 2, 2 };
2749
2750 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2751 3, 3, 3, 3, 4, 4, 4, 4 });
2752
2753 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2754 4, 4, 4, 4, 5, 5, 5, 5 });
2755
2756 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2757 4, 4, 4, 4, 5, 5, 5, 5 });
2758
2759 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2760 workloadFactory,
2761 memoryManager,
2762 shape,
2763 input0,
2764 shape,
2765 input1,
2766 shape,
2767 output,
2768 1.0f,
2769 0);
2770}
2771
2772LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2773 armnn::IWorkloadFactory& workloadFactory,
2774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2775{
2776 const unsigned int shape0[] = { 1, 2, 2, 3 };
2777 const unsigned int shape1[] = { 1, 1, 1, 1 };
2778
2779 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2780 7, 8, 9, 10, 11, 12 });
2781
2782 std::vector<int16_t> input1({2});
2783
2784 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2785 7, 8, 9, 10, 11, 12 });
2786
2787 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2788 workloadFactory,
2789 memoryManager,
2790 shape0,
2791 input0,
2792 shape1,
2793 input1,
2794 shape0,
2795 output,
2796 1.0f,
2797 0);
2798}
2799
2800LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2801 armnn::IWorkloadFactory& workloadFactory,
2802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2803{
2804 const unsigned int shape0[] = { 1, 2, 2, 3 };
2805 const unsigned int shape1[] = { 1, 1, 1, 3 };
2806
2807 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2808 7, 8, 9, 10, 11, 12 });
2809
2810 std::vector<int16_t> input1({ 1, 10, 3});
2811
2812 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2813 7, 10, 9, 10, 11, 12 });
2814
2815 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2816 workloadFactory,
2817 memoryManager,
2818 shape0,
2819 input0,
2820 shape1,
2821 input1,
2822 shape0,
2823 output,
2824 1.0f,
2825 0);
2826}
2827
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002828LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2829 armnn::IWorkloadFactory& workloadFactory,
2830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2831{
2832 unsigned int shape0[] = { 1, 2, 2, 2 };
2833 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2834
2835 unsigned int shape1[] = { 1, 1, 1, 1 };
2836 std::vector<float> input1({ 2 });
2837
2838 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2839
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002840 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2841 workloadFactory,
2842 memoryManager,
2843 shape0,
2844 input0,
2845 shape1,
2846 input1,
2847 shape0,
2848 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002849}
2850
2851
2852LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2853 armnn::IWorkloadFactory& workloadFactory,
2854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2855{
2856 unsigned int shape0[] = { 1, 2, 2, 2 };
2857 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2858
2859 unsigned int shape1[] = { 1, 1, 1, 1 };
2860 std::vector<float> input1({ 5 });
2861
2862 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2863
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002864 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2865 workloadFactory,
2866 memoryManager,
2867 shape0,
2868 input0,
2869 shape1,
2870 input1,
2871 shape0,
2872 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002873}
2874
2875LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2876 armnn::IWorkloadFactory & workloadFactory,
2877 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2878{
2879 const unsigned int shape0[] = { 1, 2, 2, 3 };
2880 const unsigned int shape1[] = { 1, 1, 1, 3 };
2881
2882 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2883 7, 1, 2, 3, 4, 5 });
2884
2885 std::vector<uint8_t> input1({ 1, 2, 3});
2886
2887 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2888 1, 1, 2, 1, 2, 3 });
2889
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002890 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2891 workloadFactory,
2892 memoryManager,
2893 shape0,
2894 input0,
2895 shape1,
2896 input1,
2897 shape0,
2898 output,
2899 1.0f,
2900 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002901}
2902
Sadik Armagan2999a022019-04-09 14:20:12 +01002903LayerTestResult<int16_t, 4> MinimumInt16Test(
2904 armnn::IWorkloadFactory& workloadFactory,
2905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2906{
2907 unsigned int shape[] = { 2, 2, 2, 2 };
2908
2909 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2910 3, 3, 3, 3, 4, 4, 4, 4 });
2911
2912 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2913 4, 4, 4, 4, 5, 5, 5, 5 });
2914
2915 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2916 3, 3, 3, 3, 4, 4, 4, 4 });
2917
2918 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2919 workloadFactory,
2920 memoryManager,
2921 shape,
2922 input0,
2923 shape,
2924 input1,
2925 shape,
2926 output,
2927 1.0f,
2928 0);
2929}
2930
2931LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2932 armnn::IWorkloadFactory& workloadFactory,
2933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2934{
2935 const unsigned int shape0[] = { 1, 2, 2, 3 };
2936 const unsigned int shape1[] = { 1, 1, 1, 1 };
2937
2938 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2939 7, 8, 9, 10, 11, 12 });
2940
2941 std::vector<int16_t> input1({2});
2942
2943 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2944 2, 2, 2, 2, 2, 2 });
2945
2946 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2947 workloadFactory,
2948 memoryManager,
2949 shape0,
2950 input0,
2951 shape1,
2952 input1,
2953 shape0,
2954 output,
2955 1.0f,
2956 0);
2957}
2958
2959LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2960 armnn::IWorkloadFactory& workloadFactory,
2961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2962{
2963 const unsigned int shape0[] = { 1, 2, 2, 3 };
2964 const unsigned int shape1[] = { 1, 1, 1, 3 };
2965
2966 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2967 7, 8, 9, 10, 11, 12 });
2968
2969 std::vector<int16_t> input1({ 1, 10, 3});
2970
2971 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2972 1, 8, 3, 1, 10, 3 });
2973
2974 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2975 workloadFactory,
2976 memoryManager,
2977 shape0,
2978 input0,
2979 shape1,
2980 input1,
2981 shape0,
2982 output,
2983 1.0f,
2984 0);
2985}
2986
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002987namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002988LayerTestResult<float,4> MultiplicationTestHelper(
2989 armnn::IWorkloadFactory& workloadFactory,
2990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2991 const unsigned int shape0[4],
2992 const std::vector<float> & values0,
2993 const unsigned int shape1[4],
2994 const std::vector<float> & values1,
2995 const unsigned int outShape[4],
2996 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002997{
surmeh01bceff2f2018-03-29 16:29:27 +01002998 const size_t dimensionCount = 4;
2999 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
3000 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
3001 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00003002
surmeh01bceff2f2018-03-29 16:29:27 +01003003 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
3004 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00003005
3006 LayerTestResult<float,4> ret(outputTensorInfo);
3007
3008 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3009 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3010 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3011
3012 armnn::MultiplicationQueueDescriptor data;
3013 armnn::WorkloadInfo info;
3014 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3015 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3016 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3017
3018 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3019
3020 inputHandle0->Allocate();
3021 inputHandle1->Allocate();
3022 outputHandle->Allocate();
3023
3024 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3025 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3026
Derek Lambertif30f7d32019-04-09 10:25:02 +01003027 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003028 workload->Execute();
3029
3030 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3031
surmeh01bceff2f2018-03-29 16:29:27 +01003032 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003033 return ret;
3034}
surmeh01bceff2f2018-03-29 16:29:27 +01003035} // anonymous namespace
3036
3037
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003038LayerTestResult<float,4> MultiplicationTest(
3039 armnn::IWorkloadFactory& workloadFactory,
3040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003041{
3042 const unsigned int width = 2;
3043 const unsigned int height = 2;
3044 const unsigned int channelCount = 2;
3045 const unsigned int batchSize = 2;
3046
3047 unsigned int shape[] = { batchSize, channelCount, height, width };
3048
3049 std::vector<float> input0({
3050 1, 1, 1, 1, 2, 2, 2, 2,
3051 3, 3, 3, 3, 4, 4, 4, 4 });
3052
3053 std::vector<float> input1({
3054 2, 2, 2, 2, 3, 3, 3, 3,
3055 4, 4, 4, 4, 5, 5, 5, 5 });
3056
3057 std::vector<float> output({
3058 2, 2, 2, 2, 6, 6, 6, 6,
3059 12, 12, 12, 12, 20, 20, 20, 20 });
3060
3061 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003062 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003063 shape,
3064 input0,
3065 shape,
3066 input1,
3067 shape,
3068 output);
3069}
3070
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003071LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3072 armnn::IWorkloadFactory& workloadFactory,
3073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003074{
3075 unsigned int shape0[] = { 1, 2, 2, 2 };
3076 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3077
3078 unsigned int shape1[] = { 1, 1, 1, 1 };
3079 std::vector<float> input1({ 2 });
3080
3081 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3082
3083 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003084 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003085 shape0,
3086 input0,
3087 shape1,
3088 input1,
3089 shape0,
3090 output);
3091}
3092
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003093LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3094 armnn::IWorkloadFactory& workloadFactory,
3095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003096{
3097 unsigned int shape0[] = { 1, 3, 3, 2 };
3098 std::vector<float> input0({
3099 1, 2, 3, 4, 5, 6,
3100 7, 8, 9, 10, 11, 12,
3101 13, 14, 15, 16, 17, 18});
3102
3103 unsigned int shape1[] = { 1, 1, 1, 2 };
3104 std::vector<float> input1({ 1, 2 });
3105
3106 std::vector<float> output({
3107 1, 4, 3, 8, 5, 12,
3108 7, 16, 9, 20, 11, 24,
3109 13, 28, 15, 32, 17, 36});
3110
3111 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003112 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003113 shape0,
3114 input0,
3115 shape1,
3116 input1,
3117 shape0,
3118 output);
3119}
telsoa014fcda012018-03-09 14:13:49 +00003120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003121LayerTestResult<float,4> CompareMultiplicationTest(
3122 armnn::IWorkloadFactory& workloadFactory,
3123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3124 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003125{
3126 const unsigned int width = 16;
3127 const unsigned int height = 32;
3128 const unsigned int channelCount = 2;
3129 const unsigned int batchSize = 5;
3130
3131 armnn::TensorInfo inputTensorInfo0;
3132 armnn::TensorInfo inputTensorInfo1;
3133 armnn::TensorInfo outputTensorInfo;
3134
3135 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3136
3137 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3138 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3139 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3140
3141 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3142
3143 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3144 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3145
3146 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3147 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3148 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3149
3150 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3151 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3152 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3153
3154 armnn::MultiplicationQueueDescriptor data;
3155 armnn::WorkloadInfo info;
3156 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3157 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3158 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3159
3160 armnn::MultiplicationQueueDescriptor refData = data;
3161 armnn::WorkloadInfo refInfo = info;
3162 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3163 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3164 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3165
3166 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3167 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3168
3169 inputHandle0->Allocate();
3170 inputHandle1->Allocate();
3171 outputHandle->Allocate();
3172 inputHandle0Ref->Allocate();
3173 inputHandle1Ref->Allocate();
3174 outputHandleRef->Allocate();
3175
3176 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3177 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3178 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3179 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3180
Derek Lambertif30f7d32019-04-09 10:25:02 +01003181 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003182 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003183 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003184 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003185 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3186 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3187
3188 return comparisonResult;
3189}
3190
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003191LayerTestResult<float,4> CompareBatchNormTest(
3192 armnn::IWorkloadFactory& workloadFactory,
3193 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3194 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003195{
3196 const unsigned int width = 2;
3197 const unsigned int height = 3;
3198 const unsigned int channels = 5;
3199 const unsigned int batchSize = 3;
3200
3201 armnn::TensorInfo inputTensorInfo;
3202 armnn::TensorInfo outputTensorInfo;
3203 armnn::TensorInfo tensorInfo;
3204
3205 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3206 constexpr unsigned int tensorShape[] = {channels};
3207
3208 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3209 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3210 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3211
3212 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3213
3214 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3215 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3216 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3217 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3218
3219 LayerTestResult<float,4> ret(outputTensorInfo);
3220
3221 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3222 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3223
3224 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3225 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3226
3227 armnn::BatchNormalizationQueueDescriptor data;
3228 armnn::WorkloadInfo info;
3229 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3230 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3231 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3232 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3233
3234 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3235 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3236 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3237 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3238
3239 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3240 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3241 data.m_Mean = &meanTensor;
3242 data.m_Variance = &varianceTensor;
3243 data.m_Beta = &betaTensor;
3244 data.m_Gamma = &gammaTensor;
3245 data.m_Parameters.m_Eps = 0.01f;
3246
3247 armnn::BatchNormalizationQueueDescriptor refData = data;
3248 armnn::WorkloadInfo refInfo = info;
3249 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3250 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3251
3252 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3253 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3254
3255 inputHandle->Allocate();
3256 outputHandle->Allocate();
3257 inputHandleRef->Allocate();
3258 outputHandleRef->Allocate();
3259
3260 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3261 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3262
Derek Lambertif30f7d32019-04-09 10:25:02 +01003263 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003264 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003265 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003266 workloadRef->Execute();
3267
3268 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3269 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3270
3271 return ret;
3272}
3273
surmeh013537c2c2018-05-18 16:31:43 +01003274template<typename T>
3275void PermuteTensorData(
3276 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003278 const armnn::PermutationVector& mappings,
3279 armnn::TensorInfo & inputTensorInfo,
3280 const T * inputData,
3281 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003282{
surmeh013537c2c2018-05-18 16:31:43 +01003283 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3284 if (inputData == nullptr)
3285 {
3286 // Nullptr is an error in the test. By returning without doing the concatenation
3287 // I expect the caller to fail the test. It still makes sense to report this as
3288 // an assert for Debug builds.
3289 return;
3290 }
telsoa014fcda012018-03-09 14:13:49 +00003291
surmeh013537c2c2018-05-18 16:31:43 +01003292 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3293
3294 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3295 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3296
3297 armnn::PermuteQueueDescriptor queueDescriptor;
3298 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3299 armnn::WorkloadInfo workloadInfo;
3300 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3301 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3302
3303 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3304
3305 inputHandle->Allocate();
3306 outputHandle->Allocate();
3307
3308 CopyDataToITensorHandle(inputHandle.get(), inputData);
3309
Derek Lambertif30f7d32019-04-09 10:25:02 +01003310 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003311 workload->Execute();
3312
3313 outputData.resize(outputTensorInfo.GetNumElements());
3314 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3315 inputTensorInfo = outputTensorInfo;
3316}
3317
Jim Flynn825af452019-05-20 12:49:28 +01003318armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003319 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3320 unsigned int concatDim)
3321{
telsoa014fcda012018-03-09 14:13:49 +00003322 std::vector<armnn::TensorShape> shapes;
3323 shapes.reserve(inputTensorInfos.size());
3324 for (const armnn::TensorInfo& it: inputTensorInfos)
3325 {
3326 shapes.push_back(it.GetShape());
3327 }
surmeh013537c2c2018-05-18 16:31:43 +01003328
Jim Flynn825af452019-05-20 12:49:28 +01003329 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3330 shapes.end(),
3331 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003332}
3333
3334//
narpra015cdda352018-11-19 15:30:27 +00003335// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3336// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3337// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003338//
3339
3340bool NeedPermuteForConcat(
3341 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3342 unsigned int concatDim)
3343{
3344 // See note above. Additionally we expect the input shapes to have the
3345 // same number of dimensions.
3346 unsigned int nDimensions = 0;
3347
telsoa01c577f2c2018-08-31 09:22:23 +01003348 // Determine the number of dimensions as well as sanity check them
3349 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003350 for (auto && tensorInfo : inputTensorInfos)
3351 {
3352 if (!nDimensions)
3353 {
3354 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3355 }
3356 else
3357 {
3358 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3359 "Input shapes must have the same number of dimensions");
3360 }
3361 }
3362
narpra015cdda352018-11-19 15:30:27 +00003363 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003364}
3365
3366armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3367{
3368 unsigned int numDims = inputShape.GetNumDimensions();
3369 if (numDims >= 3)
3370 {
3371 // Nothing to do if the inputShape has at least 3 dimensions.
3372 return inputShape;
3373 }
3374
3375 std::vector<unsigned int> newDims(size_t(3), 1u);
3376 unsigned int expandedBy = 3 - numDims;
3377 for (unsigned int i=0; i<numDims; ++i)
3378 {
3379 newDims[expandedBy+i] = inputShape[i];
3380 }
3381 return armnn::TensorShape(3u, &newDims[0]);
3382}
3383
3384void Generate3dPermuteVectorForConcat(
3385 unsigned int numDimensions,
3386 unsigned int & concatDim,
3387 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3388{
3389 BOOST_ASSERT_MSG(numDimensions <= 3,
3390 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003391 unsigned int expandedBy = 3 - numDimensions;
3392 unsigned int expandedConcatAxis = concatDim + expandedBy;
3393
3394 if (expandedConcatAxis == 2)
3395 {
3396 concatDim = 0;
3397 armnn::PermutationVector forwardPermutation({1, 2, 0});
3398 armnn::PermutationVector reversePermutation({2, 0, 1});
3399 permutations = std::make_pair(forwardPermutation, reversePermutation);
3400 }
3401 else if (expandedConcatAxis == 1)
3402 {
3403 concatDim = 0;
3404 armnn::PermutationVector forwardPermutation({2, 0, 1});
3405 armnn::PermutationVector reversePermutation({1, 2, 0});
3406 permutations = std::make_pair(forwardPermutation, reversePermutation);
3407 }
3408 else
3409 {
3410 BOOST_ASSERT(expandedConcatAxis == 0);
3411 concatDim = 0;
3412 }
3413}
3414
3415//
3416// Permute the input tensors so we can do a supported concatenation.
3417// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3418// at the front. Finally this function tells what the output shape
3419// of the permuted concatenated tensor is going to be.
3420//
3421template <typename T>
3422void PermuteInputsForConcat(
3423 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003425 std::vector<armnn::TensorInfo> & inputTensorInfos,
3426 std::vector<T *> & inputData,
3427 std::vector<std::vector<T>> & inputDataStorage,
3428 armnn::PermutationVector & permuteVector,
3429 unsigned int & concatDim,
3430 armnn::TensorInfo & outputTensorInfo)
3431{
3432 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3433 "Expecting more than one tensor to be concatenated here");
3434
3435 unsigned int numDims = 0;
3436 unsigned int nthInput = 0;
3437 const armnn::PermutationVector identity({0, 1, 2});
3438
3439 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3440 std::make_pair(identity, identity);
3441
3442 inputDataStorage.resize(inputData.size());
3443
3444 for (auto && tensorInfo : inputTensorInfos)
3445 {
3446 if (numDims == 0)
3447 {
3448 numDims = tensorInfo.GetShape().GetNumDimensions();
3449 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003450
telsoa01c577f2c2018-08-31 09:22:23 +01003451 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003452 permuteVector = permutations.second;
3453 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3454 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3455 }
3456 else
3457 {
3458 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3459 "All inputs must have the same number of dimensions");
3460 }
3461
3462 armnn::TensorInfo newTensorInfo = tensorInfo;
3463 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3464
3465 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003466 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003467 permutations.first,
3468 newTensorInfo,
3469 inputData[nthInput],
3470 inputDataStorage[nthInput]);
3471
3472 inputData[nthInput] = inputDataStorage[nthInput].data();
3473 inputTensorInfos[nthInput] = newTensorInfo;
3474
3475 ++nthInput;
3476 }
3477
3478 outputTensorInfo.SetShape(
3479 armnnUtils::Permuted(
3480 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3481 permutations.first));
3482}
3483
3484
3485//
3486// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003487// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003488// output.
3489//
3490template <typename T>
3491void PermuteOutputForConcat(
3492 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003494 const armnn::TensorInfo & tensorInfo,
3495 const armnn::PermutationVector & permuteVector,
3496 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3497 T * data)
3498{
3499 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3500 if (data == nullptr)
3501 {
3502 // Nullptr is an error in the test. By returning without doing the permutation
3503 // I expect the caller to fail the test. It still makes sense to report this as
3504 // an assert for Debug builds.
3505 return;
3506 }
3507
3508 armnn::TensorInfo resultTensorInfo = tensorInfo;
3509 std::vector<T> inputData(tensorInfo.GetNumElements());
3510 std::vector<T> outputData;
3511
3512 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3513
3514 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003515 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003516 permuteVector,
3517 resultTensorInfo,
3518 &inputData[0],
3519 outputData);
3520
3521 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3522}
3523
3524template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003525void Concatenate(
3526 armnn::IWorkloadFactory& workloadFactory,
3527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3528 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3529 std::initializer_list<T *> inputsOrig,
3530 const armnn::TensorInfo& outputTensorInfoOrig,
3531 T * output,
narpra015cdda352018-11-19 15:30:27 +00003532 unsigned int concatDim,
3533 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003534{
3535 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3536 if (output == nullptr)
3537 {
3538 // Nullptr is an error in the test. By returning without doing the permutation
3539 // I expect the caller to fail the test. It still makes sense to report this as
3540 // an assert for Debug builds.
3541 return;
3542 }
3543
telsoa01c577f2c2018-08-31 09:22:23 +01003544 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003545 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3546 std::vector<T *> inputs = inputsOrig;
3547 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3548
3549 armnn::PermutationVector permuteVector{0, 1, 2};
3550
telsoa01c577f2c2018-08-31 09:22:23 +01003551 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003552 std::vector<std::vector<T>> tmpInputDataStorage;
3553
3554 const size_t inputCount = inputTensorInfos.size();
3555
3556 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3557
3558 if (needPermuteForConcat)
3559 {
3560 //
3561 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003562 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003563 //
3564 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003565 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003566 inputTensorInfos,
3567 inputs,
3568 tmpInputDataStorage,
3569 permuteVector,
3570 concatDim,
3571 outputTensorInfo);
3572 }
3573
narpra015cdda352018-11-19 15:30:27 +00003574 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003575
3576 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3577 inputHandles.reserve(inputCount);
3578
narpra015cdda352018-11-19 15:30:27 +00003579 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3580
Jim Flynne242f2d2019-05-22 14:24:13 +01003581 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01003582 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00003583 queueDescriptor.m_Parameters = viewsDescriptor;
3584
3585 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003586 {
narpra015cdda352018-11-19 15:30:27 +00003587 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3588 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3589 {
3590 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3591 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3592 }
telsoa014fcda012018-03-09 14:13:49 +00003593
narpra015cdda352018-11-19 15:30:27 +00003594 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003595
narpra015cdda352018-11-19 15:30:27 +00003596 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3597 for (unsigned int i = 0; i < inputCount; ++i)
3598 {
3599 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3600 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3601 subTensorsSupported ?
3602 workloadFactory.CreateSubTensorHandle(*outputHandle,
3603 inputTensorInfo.GetShape(),
3604 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3605 workloadFactory.CreateTensorHandle(inputTensorInfo);
3606
3607 inputHandles.emplace_back(std::move(inputHandle));
3608 }
3609
telsoa014fcda012018-03-09 14:13:49 +00003610 }
narpra015cdda352018-11-19 15:30:27 +00003611 else
3612 {
3613 for (unsigned int i = 0; i < inputCount; ++i)
3614 {
3615 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3616 inputHandles.emplace_back(std::move(inputHandle));
3617 }
3618 }
telsoa014fcda012018-03-09 14:13:49 +00003619
3620 for (unsigned int i = 0; i < inputCount; ++i)
3621 {
surmeh013537c2c2018-05-18 16:31:43 +01003622 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003623 }
3624
3625 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3626
Jim Flynn4ed6c832019-05-20 11:02:46 +01003627 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00003628
3629 for (auto& inputHandle : inputHandles)
3630 {
3631 inputHandle->Allocate();
3632 }
3633
3634 outputHandle->Allocate();
3635
3636 unsigned int nextInputId = 0;
3637 for (auto& inputHandle : inputHandles)
3638 {
surmeh013537c2c2018-05-18 16:31:43 +01003639 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3640 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003641 }
3642
Derek Lambertif30f7d32019-04-09 10:25:02 +01003643 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003644 workload->Execute();
3645
surmeh013537c2c2018-05-18 16:31:43 +01003646 if (needPermuteForConcat)
3647 {
3648 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003649 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003650 outputTensorInfo,
3651 permuteVector,
3652 std::move(outputHandle),
3653 output);
3654 }
3655 else
3656 {
3657 CopyDataFromITensorHandle(output, outputHandle.get());
3658 }
telsoa014fcda012018-03-09 14:13:49 +00003659}
3660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003661template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003662LayerTestResult<T, 1> Concatenation1dTestImpl(
3663 armnn::IWorkloadFactory& workloadFactory,
3664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3665 float qScale,
3666 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003667{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003668 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003669
3670 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3671 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3672 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3673
Jim Flynncbb66aa2019-05-15 13:03:54 +01003674 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003675
3676 LayerTestResult<T, 1> result(outputTensorInfo);
3677
3678 std::vector<T> output;
3679 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003680 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003681 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3682 { input0.data(), input1.data(), input2.data() },
3683 outputTensorInfo,
3684 output.data(),
3685 0,
3686 true);
telsoa014fcda012018-03-09 14:13:49 +00003687
3688 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3689 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3690 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3691 }));
3692
3693 return result;
3694}
3695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003696LayerTestResult<float, 1> Concatenation1dTest(
3697 armnn::IWorkloadFactory& workloadFactory,
3698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003700 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003701}
3702
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003703template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003704LayerTestResult<T, 2> Concatenation2dTestImpl(
3705 armnn::IWorkloadFactory& workloadFactory,
3706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003707 const armnn::TensorInfo& outputTensorInfo,
3708 unsigned int dimension,
3709 const float qScale,
3710 const int32_t qOffset)
3711{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003712 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003713
3714 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3715 // Batch 0
3716 1.0f, 2.0f, 3.0f,
3717
3718 // Batch 1
3719 10.0f, 11.0f, 12.0f,
3720 }));
3721
3722 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3723 // Batch 0
3724 4.0f, 5.0f, 6.0f,
3725
3726 // Batch 1
3727 13.0f, 14.0f, 15.0f,
3728 }));
3729
3730 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3731 // Batch 0
3732 7.0f, 8.0f, 9.0f,
3733
3734 // Batch 1
3735 16.0f, 17.0f, 18.0f,
3736 }));
3737
3738 LayerTestResult<T, 2> result(outputTensorInfo);
3739
3740 std::vector<T> output;
3741 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003742 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003743 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3744 { input0.data(), input1.data(), input2.data() },
3745 outputTensorInfo,
3746 output.data(),
3747 dimension,
3748 true);
telsoa014fcda012018-03-09 14:13:49 +00003749
3750 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3751 return result;
3752}
3753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003754template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003755LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3756 armnn::IWorkloadFactory& workloadFactory,
3757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3758 float qScale,
3759 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003760{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003761 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003762
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003763 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3764 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3765
telsoa014fcda012018-03-09 14:13:49 +00003766 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3767 // Batch 0
3768 1.0f, 2.0f, 3.0f,
3769
3770 // Batch 1
3771 10.0f, 11.0f, 12.0f,
3772
3773 // Batch 2
3774 4.0f, 5.0f, 6.0f,
3775
3776 // Batch 3
3777 13.0f, 14.0f, 15.0f,
3778
3779 // Batch 4
3780 7.0f, 8.0f, 9.0f,
3781
3782 // Batch 5
3783 16.0f, 17.0f, 18.0f,
3784 }));
3785
3786 return result;
3787}
3788
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003789LayerTestResult<float, 2> Concatenation2dDim0Test(
3790 armnn::IWorkloadFactory& workloadFactory,
3791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003792{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003793 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003794}
3795
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003796template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003797LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3798 armnn::IWorkloadFactory& workloadFactory,
3799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3800 float qScale,
3801 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003802{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003803 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003804
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003805 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3806 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3807
telsoa014fcda012018-03-09 14:13:49 +00003808 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3809 // Batch 0
3810 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3811
3812 // Batch 1
3813 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3814 }));
3815
3816 return result;
3817}
3818
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003819LayerTestResult<float, 2> Concatenation2dDim1Test(
3820 armnn::IWorkloadFactory& workloadFactory,
3821 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003822{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003823 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003824}
3825
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003826template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003827LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3828 armnn::IWorkloadFactory& workloadFactory,
3829 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3830 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003831 int32_t qOffset)
3832{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003833 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003834 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3835 // Batch 0
3836 1.0f, 2.0f, 3.0f,
3837
3838 // Batch 1
3839 10.0f, 11.0f, 12.0f,
3840 }));
3841
Jim Flynncbb66aa2019-05-15 13:03:54 +01003842 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003843 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3844 // Batch 0
3845 4.0f, 5.0f, 6.0f,
3846
3847 // Batch 1
3848 13.0f, 14.0f, 15.0f,
3849
3850 // Batch 0
3851 7.0f, 8.0f, 9.0f,
3852 }));
3853
Jim Flynncbb66aa2019-05-15 13:03:54 +01003854 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003855 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3856 // Batch 1
3857 16.0f, 17.0f, 18.0f,
3858 }));
3859
Jim Flynncbb66aa2019-05-15 13:03:54 +01003860 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003861 LayerTestResult<T, 2> result(outputTensorInfo);
3862
3863 std::vector<T> output;
3864 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003865 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003866 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3867 { input0.data(), input1.data(), input2.data() },
3868 outputTensorInfo,
3869 output.data(),
3870 0,
3871 true);
telsoa014fcda012018-03-09 14:13:49 +00003872
3873 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3874 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3875 // Batch 0
3876 1.0f, 2.0f, 3.0f,
3877
3878 // Batch 1
3879 10.0f, 11.0f, 12.0f,
3880
3881 // Batch 2
3882 4.0f, 5.0f, 6.0f,
3883
3884 // Batch 3
3885 13.0f, 14.0f, 15.0f,
3886
3887 // Batch 4
3888 7.0f, 8.0f, 9.0f,
3889
3890 // Batch 5
3891 16.0f, 17.0f, 18.0f,
3892 }));
3893
3894 return result;
3895}
3896
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003897LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3898 armnn::IWorkloadFactory& workloadFactory,
3899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003900{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003901 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3902 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003903}
3904
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003905template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003906LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3907 armnn::IWorkloadFactory& workloadFactory,
3908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3909 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003910 int32_t qOffset)
3911{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003912 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003913 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3914 // Batch 0
3915 1.0f, 2.0f, 3.0f,
3916
3917 // Batch 1
3918 10.0f, 11.0f, 12.0f,
3919 }));
3920
Jim Flynncbb66aa2019-05-15 13:03:54 +01003921 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003922 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3923 // Batch 0
3924 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3925
3926 // Batch 1
3927 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3928 }));
3929
Jim Flynncbb66aa2019-05-15 13:03:54 +01003930 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003931 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3932 // Batch 0
3933 9.0f,
3934
3935 // Batch 1
3936 18.0f
3937 }));
3938
Jim Flynncbb66aa2019-05-15 13:03:54 +01003939 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003940 LayerTestResult<T, 2> result(outputTensorInfo);
3941
3942 std::vector<T> output;
3943 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003944 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003945 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3946 { input0.data(), input1.data(), input2.data() },
3947 outputTensorInfo,
3948 output.data(),
3949 1,
3950 true);
telsoa014fcda012018-03-09 14:13:49 +00003951
3952 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3953 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3954 // Batch 0
3955 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3956
3957 // Batch 1
3958 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3959 }));
3960
3961 return result;
3962}
3963
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003964LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3965 armnn::IWorkloadFactory& workloadFactory,
3966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003967{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003968 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3969 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003970}
3971
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003972template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003973LayerTestResult<T, 3> Concatenation3dTestImpl(
3974 armnn::IWorkloadFactory& workloadFactory,
3975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003976 const armnn::TensorInfo& outputTensorInfo,
3977 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003978 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003979 float qScale,
3980 int32_t qOffset)
3981{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003982 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003983
3984 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3985 // Batch 0, Channel 0
3986 1.0f, 2.0f,
3987
3988 // Batch 0, Channel 1
3989 3.0f, 4.0f,
3990
3991 // Batch 0, Channel 2
3992 5.0f, 6.0f,
3993
3994 // Batch 1, Channel 0
3995 19.0f, 20.0f,
3996
3997 // Batch 1, Channel 1
3998 21.0f, 22.0f,
3999
4000 // Batch 1, Channel 2
4001 23.0f, 24.0f
4002 }));
4003
4004 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4005 // Batch 0, Channel 0
4006 7.0f, 8.0f,
4007
4008 // Batch 0, Channel 1
4009 9.0f, 10.0f,
4010
4011 // Batch 0, Channel 2
4012 11.0f, 12.0f,
4013
4014 // Batch 1, Channel 0
4015 25.0f, 26.0f,
4016
4017 // Batch 1, Channel 1
4018 27.0f, 28.0f,
4019
4020 // Batch 1, Channel 2
4021 29.0f, 30.0f
4022 }));
4023
4024 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4025 // Batch 0, Channel 0
4026 13.0f, 14.0f,
4027
4028 // Batch 0, Channel 1
4029 15.0f, 16.0f,
4030
4031 // Batch 0, Channel 2
4032 17.0f, 18.0f,
4033
4034 // Batch 1, Channel 0
4035 31.0f, 32.0f,
4036
4037 // Batch 1, Channel 1
4038 33.0f, 34.0f,
4039
4040 // Batch 1, Channel 2
4041 35.0f, 36.0f
4042 }));
4043
4044 LayerTestResult<T, 3> result(outputTensorInfo);
4045
4046 std::vector<T> output;
4047 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004048 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004049 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4050 { input0.data(), input1.data(), input2.data() },
4051 outputTensorInfo,
4052 output.data(),
4053 dimension,
4054 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004055
4056 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4057 return result;
4058}
4059
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004060template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004061LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4062 armnn::IWorkloadFactory& workloadFactory,
4063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4064 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004065 int32_t qOffset)
4066{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004067 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004069 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4070 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4071
telsoa014fcda012018-03-09 14:13:49 +00004072 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4073 // Batch 0, Channel 0
4074 1.0f, 2.0f,
4075
4076 // Batch 0, Channel 1
4077 3.0f, 4.0f,
4078
4079 // Batch 0, Channel 2
4080 5.0f, 6.0f,
4081
4082 // Batch 1, Channel 0
4083 19.0f, 20.0f,
4084
4085 // Batch 1, Channel 1
4086 21.0f, 22.0f,
4087
4088 // Batch 1, Channel 2
4089 23.0f, 24.0f,
4090
4091 // Batch 2, Channel 0
4092 7.0f, 8.0f,
4093
4094 // Batch 2, Channel 1
4095 9.0f, 10.0f,
4096
4097 // Batch 2, Channel 2
4098 11.0f, 12.0f,
4099
4100 // Batch 3, Channel 0
4101 25.0f, 26.0f,
4102
4103 // Batch 3, Channel 1
4104 27.0f, 28.0f,
4105
4106 // Batch 3, Channel 2
4107 29.0f, 30.0f,
4108
4109 // Batch 4, Channel 0
4110 13.0f, 14.0f,
4111
4112 // Batch 4, Channel 1
4113 15.0f, 16.0f,
4114
4115 // Batch 4, Channel 2
4116 17.0f, 18.0f,
4117
4118 // Batch 5, Channel 0
4119 31.0f, 32.0f,
4120
4121 // Batch 5, Channel 1
4122 33.0f, 34.0f,
4123
4124 // Batch 5, Channel 2
4125 35.0f, 36.0f
4126 }));
narpra015cdda352018-11-19 15:30:27 +00004127
telsoa014fcda012018-03-09 14:13:49 +00004128 return result;
4129}
4130
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004131LayerTestResult<float, 3> Concatenation3dDim0Test(
4132 armnn::IWorkloadFactory& workloadFactory,
4133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004134{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004135 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004136}
4137
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004138template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004139LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4140 armnn::IWorkloadFactory& workloadFactory,
4141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4142 float qScale,
4143 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004144{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004145 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004146
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004147 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4148 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004149
telsoa014fcda012018-03-09 14:13:49 +00004150 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4151 // Batch 0, Channel 0
4152 1.0f, 2.0f,
4153
4154 // Batch 0, Channel 1
4155 3.0f, 4.0f,
4156
4157 // Batch 0, Channel 2
4158 5.0f, 6.0f,
4159
4160 // Batch 0, Channel 3
4161 7.0f, 8.0f,
4162
4163 // Batch 0, Channel 4
4164 9.0f, 10.0f,
4165
4166 // Batch 0, Channel 5
4167 11.0f, 12.0f,
4168
4169 // Batch 0, Channel 6
4170 13.0f, 14.0f,
4171
4172 // Batch 0, Channel 7
4173 15.0f, 16.0f,
4174
4175 // Batch 0, Channel 8
4176 17.0f, 18.0f,
4177
4178 // Batch 1, Channel 0
4179 19.0f, 20.0f,
4180
4181 // Batch 1, Channel 1
4182 21.0f, 22.0f,
4183
4184 // Batch 1, Channel 2
4185 23.0f, 24.0f,
4186
4187 // Batch 1, Channel 3
4188 25.0f, 26.0f,
4189
4190 // Batch 1, Channel 4
4191 27.0f, 28.0f,
4192
4193 // Batch 1, Channel 5
4194 29.0f, 30.0f,
4195
4196 // Batch 1, Channel 6
4197 31.0f, 32.0f,
4198
4199 // Batch 1, Channel 7
4200 33.0f, 34.0f,
4201
4202 // Batch 1, Channel 8
4203 35.0f, 36.0f
4204 }));
4205
4206 return result;
4207}
4208
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004209LayerTestResult<float, 3> Concatenation3dDim1Test(
4210 armnn::IWorkloadFactory& workloadFactory,
4211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004212{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004213 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004214}
4215
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004216template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004217LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4218 armnn::IWorkloadFactory& workloadFactory,
4219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004220 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004221 float qScale,
4222 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004223{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004224 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004225
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004226 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4227 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004228
telsoa014fcda012018-03-09 14:13:49 +00004229 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4230 // Batch 0, Channel 0
4231 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4232
4233 // Batch 0, Channel 1
4234 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4235
4236 // Batch 0, Channel 2
4237 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4238
4239 // Batch 1, Channel 0
4240 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4241
4242 // Batch 1, Channel 1
4243 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4244
4245 // Batch 1, Channel 2
4246 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4247 }));
4248
4249 return result;
4250}
4251
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004252LayerTestResult<float, 3> Concatenation3dDim2Test(
4253 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4255 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004256{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004257 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4258 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004259}
4260
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004261template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004262LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4263 armnn::IWorkloadFactory& workloadFactory,
4264 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4265 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004266 int32_t qOffset)
4267{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004268 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004269 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4270 // Batch 0, Channel 0
4271 1.0f, 2.0f,
4272
4273 // Batch 0, Channel 1
4274 3.0f, 4.0f,
4275
4276 // Batch 0, Channel 2
4277 5.0f, 6.0f,
4278
4279 // Batch 1, Channel 0
4280 19.0f, 20.0f,
4281
4282 // Batch 1, Channel 1
4283 21.0f, 22.0f,
4284
4285 // Batch 1, Channel 2
4286 23.0f, 24.0f
4287 }));
4288
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004289 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004290 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4291 // Batch 0, Channel 0
4292 7.0f, 8.0f,
4293
4294 // Batch 0, Channel 1
4295 9.0f, 10.0f,
4296
4297 // Batch 0, Channel 2
4298 11.0f, 12.0f,
4299 }));
4300
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004301 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004302 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4303 // Batch 0, Channel 0
4304 25.0f, 26.0f,
4305
4306 // Batch 0, Channel 1
4307 27.0f, 28.0f,
4308
4309 // Batch 0, Channel 2
4310 29.0f, 30.0f,
4311
4312 // Batch 1, Channel 0
4313 13.0f, 14.0f,
4314
4315 // Batch 1, Channel 1
4316 15.0f, 16.0f,
4317
4318 // Batch 1, Channel 2
4319 17.0f, 18.0f,
4320
4321 // Batch 2, Channel 0
4322 31.0f, 32.0f,
4323
4324 // Batch 2, Channel 1
4325 33.0f, 34.0f,
4326
4327 // Batch 2, Channel 2
4328 35.0f, 36.0f
4329 }));
4330
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004331 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004332 LayerTestResult<T, 3> result(outputTensorInfo);
4333
4334 std::vector<T> output;
4335 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004336 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004337 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4338 { input0.data(), input1.data(), input2.data() },
4339 outputTensorInfo,
4340 output.data(),
4341 0,
4342 true);
telsoa014fcda012018-03-09 14:13:49 +00004343
4344 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4345 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4346 // Batch 0, Channel 0
4347 1.0f, 2.0f,
4348
4349 // Batch 0, Channel 1
4350 3.0f, 4.0f,
4351
4352 // Batch 0, Channel 2
4353 5.0f, 6.0f,
4354
4355 // Batch 1, Channel 0
4356 19.0f, 20.0f,
4357
4358 // Batch 1, Channel 1
4359 21.0f, 22.0f,
4360
4361 // Batch 1, Channel 2
4362 23.0f, 24.0f,
4363
4364 // Batch 2, Channel 0
4365 7.0f, 8.0f,
4366
4367 // Batch 2, Channel 1
4368 9.0f, 10.0f,
4369
4370 // Batch 2, Channel 2
4371 11.0f, 12.0f,
4372
4373 // Batch 3, Channel 0
4374 25.0f, 26.0f,
4375
4376 // Batch 3, Channel 1
4377 27.0f, 28.0f,
4378
4379 // Batch 3, Channel 2
4380 29.0f, 30.0f,
4381
4382 // Batch 4, Channel 0
4383 13.0f, 14.0f,
4384
4385 // Batch 4, Channel 1
4386 15.0f, 16.0f,
4387
4388 // Batch 4, Channel 2
4389 17.0f, 18.0f,
4390
4391 // Batch 5, Channel 0
4392 31.0f, 32.0f,
4393
4394 // Batch 5, Channel 1
4395 33.0f, 34.0f,
4396
4397 // Batch 5, Channel 2
4398 35.0f, 36.0f
4399 }));
4400
4401 return result;
4402}
4403
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004404LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4405 armnn::IWorkloadFactory& workloadFactory,
4406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004408 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4409 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004410}
4411
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004412template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004413LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4414 armnn::IWorkloadFactory& workloadFactory,
4415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4416 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004417 int32_t qOffset)
4418{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004419 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004420 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4421 // Batch 0, Channel 0
4422 1.0f, 2.0f,
4423
4424 // Batch 0, Channel 1
4425 3.0f, 4.0f,
4426
4427 // Batch 0, Channel 2
4428 5.0f, 6.0f,
4429
4430 // Batch 1, Channel 0
4431 19.0f, 20.0f,
4432
4433 // Batch 1, Channel 1
4434 21.0f, 22.0f,
4435
4436 // Batch 1, Channel 2
4437 23.0f, 24.0f
4438 }));
4439
Jim Flynncbb66aa2019-05-15 13:03:54 +01004440 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004441 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4442 // Batch 0, Channel 0
4443 7.0f, 8.0f,
4444
4445 // Batch 0, Channel 1
4446 9.0f, 10.0f,
4447
4448 // Batch 0, Channel 2
4449 11.0f, 12.0f,
4450
4451 // Batch 0, Channel 3
4452 25.0f, 26.0f,
4453
4454 // Batch 1, Channel 0
4455 27.0f, 28.0f,
4456
4457 // Batch 1, Channel 1
4458 29.0f, 30.0f,
4459
4460 // Batch 1, Channel 2
4461 13.0f, 14.0f,
4462
4463 // Batch 1, Channel 3
4464 15.0f, 16.0f,
4465 }));
4466
Jim Flynncbb66aa2019-05-15 13:03:54 +01004467 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004468 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4469 // Batch 0, Channel 0
4470 17.0f, 18.0f,
4471
4472 // Batch 1, Channel 0
4473 31.0f, 32.0f,
4474 }));
4475
Jim Flynncbb66aa2019-05-15 13:03:54 +01004476 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004477 LayerTestResult<T, 3> result(outputTensorInfo);
4478
4479 std::vector<T> output;
4480 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004481 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004482 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4483 { input0.data(), input1.data(), input2.data() },
4484 outputTensorInfo,
4485 output.data(),
4486 1,
4487 true);
telsoa014fcda012018-03-09 14:13:49 +00004488
4489 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4490 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4491 // Batch 0, Channel 0
4492 1.0f, 2.0f,
4493
4494 // Batch 0, Channel 1
4495 3.0f, 4.0f,
4496
4497 // Batch 0, Channel 2
4498 5.0f, 6.0f,
4499
4500 // Batch 0, Channel 3
4501 7.0f, 8.0f,
4502
4503 // Batch 0, Channel 4
4504 9.0f, 10.0f,
4505
4506 // Batch 0, Channel 5
4507 11.0f, 12.0f,
4508
4509 // Batch 0, Channel 6
4510 25.0f, 26.0f,
4511
4512 // Batch 0, Channel 7
4513 17.0f, 18.0f,
4514
4515 // Batch 1, Channel 0
4516 19.0f, 20.0f,
4517
4518 // Batch 1, Channel 1
4519 21.0f, 22.0f,
4520
4521 // Batch 1, Channel 2
4522 23.0f, 24.0f,
4523
4524 // Batch 1, Channel 3
4525 27.0f, 28.0f,
4526
4527 // Batch 1, Channel 4
4528 29.0f, 30.0f,
4529
4530 // Batch 1, Channel 5
4531 13.0f, 14.0f,
4532
4533 // Batch 1, Channel 6
4534 15.0f, 16.0f,
4535
4536 // Batch 1, Channel 7
4537 31.0f, 32.0f,
4538 }));
4539
4540 return result;
4541}
4542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004543LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4544 armnn::IWorkloadFactory& workloadFactory,
4545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004546{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004547 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4548 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004549}
4550
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004551template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004552LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4553 armnn::IWorkloadFactory& workloadFactory,
4554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004555 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004556 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004557 int32_t qOffset)
4558{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004559 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004560 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4561 // Batch 0, Channel 0
4562 1.0f, 2.0f,
4563
4564 // Batch 0, Channel 1
4565 3.0f, 4.0f,
4566
4567 // Batch 0, Channel 2
4568 5.0f, 6.0f,
4569
4570 // Batch 1, Channel 0
4571 19.0f, 20.0f,
4572
4573 // Batch 1, Channel 1
4574 21.0f, 22.0f,
4575
4576 // Batch 1, Channel 2
4577 23.0f, 24.0f
4578 }));
4579
Jim Flynncbb66aa2019-05-15 13:03:54 +01004580 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004581 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4582 // Batch 0, Channel 0
4583 7.0f,
4584
4585 // Batch 0, Channel 1
4586 9.0f,
4587
4588 // Batch 0, Channel 2
4589 11.0f,
4590
4591 // Batch 1, Channel 0
4592 25.0f,
4593
4594 // Batch 1, Channel 1
4595 27.0f,
4596
4597 // Batch 1, Channel 2
4598 29.0f
4599 }));
4600
Jim Flynncbb66aa2019-05-15 13:03:54 +01004601 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004602 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4603 // Batch 0, Channel 0
4604 13.0f, 14.0f, 50.0f,
4605
4606 // Batch 0, Channel 1
4607 15.0f, 16.0f, 51.0f,
4608
4609 // Batch 0, Channel 2
4610 17.0f, 18.0f, 52.0f,
4611
4612 // Batch 1, Channel 0
4613 31.0f, 32.0f, 53.0f,
4614
4615 // Batch 1, Channel 1
4616 33.0f, 34.0f, 54.0f,
4617
4618 // Batch 1, Channel 2
4619 35.0f, 36.0f, 55.0f,
4620 }));
4621
Jim Flynncbb66aa2019-05-15 13:03:54 +01004622 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004623 LayerTestResult<T, 3> result(outputTensorInfo);
4624
4625 std::vector<T> output;
4626 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004627 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004628 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4629 { input0.data(), input1.data(), input2.data() },
4630 outputTensorInfo,
4631 output.data(),
4632 2,
4633 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004634
4635 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4636 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4637 // Batch 0, Channel 0
4638 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4639
4640 // Batch 0, Channel 1
4641 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4642
4643 // Batch 0, Channel 2
4644 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4645
4646 // Batch 1, Channel 0
4647 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4648
4649 // Batch 1, Channel 1
4650 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4651
4652 // Batch 1, Channel 2
4653 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4654 }));
4655
4656 return result;
4657}
4658
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004659LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4660 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4662 bool useSubtensor)
4663{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004664 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4665 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004666}
4667
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004668template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004669LayerTestResult<T, 4> Concatenation4dTestImpl(
4670 armnn::IWorkloadFactory& workloadFactory,
4671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4672 const armnn::TensorInfo& outputTensorInfo,
4673 unsigned int dimension,
4674 bool useSubtensor,
4675 float qScale,
4676 int32_t qOffset)
4677{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004678 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004679
4680 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4681 1.0f, 2.0f,
4682 3.0f, 4.0f,
4683 5.0f, 6.0f,
4684 7.0f, 8.0f,
4685 9.0f, 10.0f,
4686 11.0f, 12.0f
4687 }));
4688
4689 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4690 11.0f, 12.0f,
4691 13.0f, 14.0f,
4692 15.0f, 16.0f,
4693 17.0f, 18.0f,
4694 19.0f, 20.0f,
4695 21.0f, 22.0f
4696 }));
4697
4698 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4699 21.0f, 22.0f,
4700 23.0f, 24.0f,
4701 25.0f, 26.0f,
4702 27.0f, 28.0f,
4703 29.0f, 30.0f,
4704 31.0f, 32.0f
4705 }));
4706
4707 LayerTestResult<T, 4> result(outputTensorInfo);
4708
4709 std::vector<T> output;
4710 output.resize(outputTensorInfo.GetNumElements());
4711
4712 Concatenate<T>(workloadFactory,
4713 memoryManager,
4714 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4715 {input0.data(), input1.data(), input2.data()},
4716 outputTensorInfo,
4717 output.data(),
4718 dimension,
4719 useSubtensor);
4720
4721 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4722 return result;
4723}
4724
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004725template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004726LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4727 armnn::IWorkloadFactory& workloadFactory,
4728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4729 float qScale,
4730 int32_t qOffset)
4731{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004732 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004734 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4735 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4736
narpra015cdda352018-11-19 15:30:27 +00004737 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4738 1.0f, 2.0f,
4739 3.0f, 4.0f,
4740 5.0f, 6.0f,
4741 7.0f, 8.0f,
4742 9.0f, 10.0f,
4743 11.0f, 12.0f,
4744
4745 11.0f, 12.0f,
4746 13.0f, 14.0f,
4747 15.0f, 16.0f,
4748 17.0f, 18.0f,
4749 19.0f, 20.0f,
4750 21.0f, 22.0f,
4751
4752 21.0f, 22.0f,
4753 23.0f, 24.0f,
4754 25.0f, 26.0f,
4755 27.0f, 28.0f,
4756 29.0f, 30.0f,
4757 31.0f, 32.0f
4758 }));
4759 return result;
4760}
4761
4762LayerTestResult<float, 4> Concatenation4dDim0Test(
4763 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004766 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004767}
4768
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004769template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004770LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4771 armnn::IWorkloadFactory& workloadFactory,
4772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4773 float qScale,
4774 int32_t qOffset)
4775{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004776 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004777
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004778 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4779 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4780
narpra015cdda352018-11-19 15:30:27 +00004781 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4782 1.0f, 2.0f,
4783 3.0f, 4.0f,
4784 5.0f, 6.0f,
4785 7.0f, 8.0f,
4786 9.0f, 10.0f,
4787 11.0f, 12.0f,
4788
4789 11.0f, 12.0f,
4790 13.0f, 14.0f,
4791 15.0f, 16.0f,
4792 17.0f, 18.0f,
4793 19.0f, 20.0f,
4794 21.0f, 22.0f,
4795
4796 21.0f, 22.0f,
4797 23.0f, 24.0f,
4798 25.0f, 26.0f,
4799 27.0f, 28.0f,
4800 29.0f, 30.0f,
4801 31.0f, 32.0f
4802 }));
4803
4804 return result;
4805}
4806
4807LayerTestResult<float, 4> Concatenation4dDim1Test(
4808 armnn::IWorkloadFactory& workloadFactory,
4809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4810{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004811 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004812}
4813
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004814template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004815LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4816 armnn::IWorkloadFactory& workloadFactory,
4817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4818 float qScale,
4819 int32_t qOffset)
4820{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004821 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004822
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004823 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4824 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4825
narpra015cdda352018-11-19 15:30:27 +00004826 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4827 1.0f, 2.0f,
4828 3.0f, 4.0f,
4829 11.0f, 12.0f,
4830 13.0f, 14.0f,
4831 21.0f, 22.0f,
4832 23.0f, 24.0f,
4833
4834 5.0f, 6.0f,
4835 7.0f, 8.0f,
4836 15.0f, 16.0f,
4837 17.0f, 18.0f,
4838 25.0f, 26.0f,
4839 27.0f, 28.0f,
4840
4841 9.0f, 10.0f,
4842 11.0f, 12.0f,
4843 19.0f, 20.0f,
4844 21.0f, 22.0f,
4845 29.0f, 30.0f,
4846 31.0f, 32.0f
4847 }));
4848
4849 return result;
4850}
4851
4852LayerTestResult<float, 4> Concatenation4dDim2Test(
4853 armnn::IWorkloadFactory& workloadFactory,
4854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4855{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004856 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004857}
4858
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004859template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004860LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4861 armnn::IWorkloadFactory& workloadFactory,
4862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4863 float qScale,
4864 int32_t qOffset,
4865 bool useSubtensor)
4866{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004867 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004868
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004869 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4870 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4871
narpra015cdda352018-11-19 15:30:27 +00004872 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4873 1.0f, 2.0f,
4874 11.0f, 12.0f,
4875 21.0f, 22.0f,
4876 3.0f, 4.0f,
4877 13.0f, 14.0f,
4878 23.0f, 24.0f,
4879
4880 5.0f, 6.0f,
4881 15.0f, 16.0f,
4882 25.0f, 26.0f,
4883 7.0f, 8.0f,
4884 17.0f, 18.0f,
4885 27.0f, 28.0f,
4886
4887 9.0f, 10.0f,
4888 19.0f, 20.0f,
4889 29.0f, 30.0f,
4890 11.0f, 12.0f,
4891 21.0f, 22.0f,
4892 31.0f, 32.0f
4893 }));
4894
4895 return result;
4896}
4897
4898LayerTestResult<float, 4> Concatenation4dDim3Test(
4899 armnn::IWorkloadFactory& workloadFactory,
4900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4901 bool useSubtensor)
4902{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004903 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4904 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004905}
4906
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004907template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004908LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4909 armnn::IWorkloadFactory& workloadFactory,
4910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4911 float qScale,
4912 int32_t qOffset)
4913{
4914 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004915 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004916
4917 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4918 1.0f, 2.0f,
4919 3.0f, 4.0f,
4920 5.0f, 6.0f,
4921 7.0f, 8.0f,
4922 9.0f, 10.0f,
4923 11.0f, 12.0f
4924 }));
4925
Jim Flynncbb66aa2019-05-15 13:03:54 +01004926 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004927
4928 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4929 11.0f, 12.0f,
4930 13.0f, 14.0f,
4931 15.0f, 16.0f,
4932 17.0f, 18.0f,
4933 19.0f, 20.0f,
4934 21.0f, 22.0f,
4935
4936 21.0f, 22.0f,
4937 23.0f, 24.0f,
4938 25.0f, 26.0f,
4939 27.0f, 28.0f,
4940 29.0f, 30.0f,
4941 31.0f, 32.0f
4942
4943 }));
4944
Jim Flynncbb66aa2019-05-15 13:03:54 +01004945 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004946
4947 LayerTestResult<T, 4> result(outputTensorInfo);
4948
4949 std::vector<T> output;
4950 output.resize(outputTensorInfo.GetNumElements());
4951 Concatenate<T>(workloadFactory,
4952 memoryManager,
4953 {inputTensorInfo0, inputTensorInfo1},
4954 {input0.data(), input1.data()},
4955 outputTensorInfo,
4956 output.data(),
4957 dimension,
4958 true);
4959
4960 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4961 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4962 1.0f, 2.0f,
4963 3.0f, 4.0f,
4964 5.0f, 6.0f,
4965 7.0f, 8.0f,
4966 9.0f, 10.0f,
4967 11.0f, 12.0f,
4968
4969 11.0f, 12.0f,
4970 13.0f, 14.0f,
4971 15.0f, 16.0f,
4972 17.0f, 18.0f,
4973 19.0f, 20.0f,
4974 21.0f, 22.0f,
4975
4976 21.0f, 22.0f,
4977 23.0f, 24.0f,
4978 25.0f, 26.0f,
4979 27.0f, 28.0f,
4980 29.0f, 30.0f,
4981 31.0f, 32.0f
4982 }));
4983
4984 return result;
4985}
4986
4987LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4988 armnn::IWorkloadFactory& workloadFactory,
4989 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4990{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004991 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4992 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004993}
4994
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004995template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004996LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4997 armnn::IWorkloadFactory& workloadFactory,
4998 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4999 float qScale,
5000 int32_t qOffset)
5001{
5002 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005003 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005004
5005 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5006 1.0f, 2.0f,
5007 3.0f, 4.0f,
5008 5.0f, 6.0f,
5009 7.0f, 8.0f,
5010 9.0f, 10.0f,
5011 11.0f, 12.0f
5012 }));
5013
Jim Flynncbb66aa2019-05-15 13:03:54 +01005014 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005015
5016 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5017 11.0f, 12.0f,
5018 13.0f, 14.0f,
5019 15.0f, 16.0f,
5020 17.0f, 18.0f,
5021
5022 }));
5023
Jim Flynncbb66aa2019-05-15 13:03:54 +01005024 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005025
5026 LayerTestResult<T, 4> result(outputTensorInfo);
5027
5028 std::vector<T> output;
5029 output.resize(outputTensorInfo.GetNumElements());
5030 Concatenate<T>(workloadFactory,
5031 memoryManager,
5032 {inputTensorInfo0, inputTensorInfo1},
5033 {input0.data(), input1.data()},
5034 outputTensorInfo,
5035 output.data(),
5036 dimension,
5037 true);
5038
5039 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5040 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5041 1.0f, 2.0f,
5042 3.0f, 4.0f,
5043 5.0f, 6.0f,
5044 7.0f, 8.0f,
5045 9.0f, 10.0f,
5046 11.0f, 12.0f,
5047 11.0f, 12.0f,
5048 13.0f, 14.0f,
5049 15.0f, 16.0f,
5050 17.0f, 18.0f
5051 }));
5052
5053 return result;
5054}
5055
5056LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5057 armnn::IWorkloadFactory& workloadFactory,
5058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5059{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005060 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5061 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005062}
5063
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005064template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005065LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5066 armnn::IWorkloadFactory& workloadFactory,
5067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5068 float qScale,
5069 int32_t qOffset)
5070{
5071 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005072 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005073
5074 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5075 1.0f, 2.0f,
5076 3.0f, 4.0f,
5077 5.0f, 6.0f,
5078 7.0f, 8.0f,
5079 9.0f, 10.0f,
5080 11.0f, 12.0f
5081 }));
5082
Jim Flynncbb66aa2019-05-15 13:03:54 +01005083 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005084
5085 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5086 11.0f, 12.0f,
5087 13.0f, 14.0f,
5088 15.0f, 16.0f,
5089 17.0f, 18.0f,
5090 19.0f, 20.0f,
5091 21.0f, 22.0f,
5092 23.0f, 24.0f,
5093 25.0f, 26.0f,
5094 27.0f, 28.0f
5095 }));
5096
Jim Flynncbb66aa2019-05-15 13:03:54 +01005097 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005098
5099 LayerTestResult<T, 4> result(outputTensorInfo);
5100
5101 std::vector<T> output;
5102 output.resize(outputTensorInfo.GetNumElements());
5103 Concatenate<T>(workloadFactory,
5104 memoryManager,
5105 {inputTensorInfo0, inputTensorInfo1},
5106 {input0.data(), input1.data()},
5107 outputTensorInfo,
5108 output.data(),
5109 dimension,
5110 true);
5111
5112 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5113 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5114 1.0f, 2.0f,
5115 3.0f, 4.0f,
5116 11.0f, 12.0f,
5117 13.0f, 14.0f,
5118 15.0f, 16.0f,
5119
5120 5.0f, 6.0f,
5121 7.0f, 8.0f,
5122 17.0f, 18.0f,
5123 19.0f, 20.0f,
5124 21.0f, 22.0f,
5125
5126 9.0f, 10.0f,
5127 11.0f, 12.0f,
5128 23.0f, 24.0f,
5129 25.0f, 26.0f,
5130 27.0f, 28.0f
5131 }));
5132
5133 return result;
5134}
5135
5136LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5137 armnn::IWorkloadFactory& workloadFactory,
5138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5139{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005140 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5141 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005142}
5143
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005144template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005145LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5146 armnn::IWorkloadFactory& workloadFactory,
5147 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5148 float qScale,
5149 int32_t qOffset,
5150 bool useSubtensor)
5151{
5152 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005153 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005154
5155 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5156 1.0f, 2.0f,
5157 3.0f, 4.0f,
5158 5.0f, 6.0f,
5159 7.0f, 8.0f,
5160 9.0f, 10.0f,
5161 11.0f, 12.0f
5162 }));
5163
Jim Flynncbb66aa2019-05-15 13:03:54 +01005164 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005165
5166 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5167 11.0f, 12.0f, 13.0f,
5168 14.0f, 15.0f, 16.0f,
5169
5170 17.0f, 18.0f, 19.0f,
5171 20.0f, 21.0f, 22.0f,
5172
5173 23.0f, 24.0f, 25.0f,
5174 26.0f, 27.0f, 28.0f
5175 }));
5176
Jim Flynncbb66aa2019-05-15 13:03:54 +01005177 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005178
5179 LayerTestResult<T, 4> result(outputTensorInfo);
5180
5181 std::vector<T> output;
5182 output.resize(outputTensorInfo.GetNumElements());
5183 Concatenate<T>(workloadFactory,
5184 memoryManager,
5185 {inputTensorInfo0, inputTensorInfo1},
5186 {input0.data(), input1.data()},
5187 outputTensorInfo,
5188 output.data(),
5189 dimension,
5190 useSubtensor);
5191
5192 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5193 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5194 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5195 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5196 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5197 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5198 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5199 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5200 }));
5201
5202 return result;
5203}
5204
5205LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5206 armnn::IWorkloadFactory& workloadFactory,
5207 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5208 bool useSubtensor)
5209{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005210 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5211 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005212}
5213
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005214LayerTestResult<float, 4> ResizeBilinearNopTest(
5215 armnn::IWorkloadFactory& workloadFactory,
5216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005217 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005218{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005219 const armnn::TensorInfo inputTensorInfo =
5220 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5221
5222 const armnn::TensorInfo outputTensorInfo =
5223 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005224
James Conroy6b965822018-11-01 11:33:09 +00005225 std::vector<float> inputData({
5226 1.0f, 2.0f, 3.0f, 4.0f,
5227 2.0f, 3.0f, 4.0f, 5.0f,
5228 3.0f, 4.0f, 5.0f, 6.0f,
5229 4.0f, 5.0f, 6.0f, 7.0f,
5230
telsoa014fcda012018-03-09 14:13:49 +00005231 1.0f, 2.0f, 3.0f, 4.0f,
5232 2.0f, 3.0f, 4.0f, 5.0f,
5233 3.0f, 4.0f, 5.0f, 6.0f,
5234 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00005235 });
5236
5237 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005238 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005239 {
5240 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005241 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005242 inputData = tmp;
5243 }
5244
5245 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005246
5247 LayerTestResult<float, 4> result(outputTensorInfo);
5248 result.outputExpected = input;
5249
5250 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5251 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5252
5253 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005254 descriptor.m_Parameters.m_DataLayout = dataLayout;
5255 armnn::WorkloadInfo info;
5256 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5257 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5258
5259 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5260
5261 inputHandle->Allocate();
5262 outputHandle->Allocate();
5263 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5264
Derek Lambertif30f7d32019-04-09 10:25:02 +01005265 workload->PostAllocationConfigure();
James Conroy074f3712018-10-03 09:32:03 +01005266 workload->Execute();
5267
5268 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5269 return result;
5270}
5271
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005272LayerTestResult<float, 4> SimpleResizeBilinearTest(
5273 armnn::IWorkloadFactory& workloadFactory,
5274 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005275 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005276{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005277 const armnn::TensorInfo inputTensorInfo =
5278 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5279
5280 const armnn::TensorInfo outputTensorInfo =
5281 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005282
James Conroy6b965822018-11-01 11:33:09 +00005283 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005284 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005285 200.0f, 250.0f,
5286
5287 250.0f, 200.0f,
5288 250.0f, 1.0f
5289 });
James Conroy074f3712018-10-03 09:32:03 +01005290
5291 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5292 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005293 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5294 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5295 // which we would expect if projecting the centre).
5296
5297 std::vector<float> outputData({
5298 1.0f,
5299
5300 250.0f
5301 });
5302
5303 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005304 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005305 {
5306 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005307 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005308 inputData = tmp;
5309
5310 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005311 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005312 outputData = tmp1;
5313 }
5314
5315 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5316
James Conroy074f3712018-10-03 09:32:03 +01005317 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005318 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005319
5320 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5321 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5322
5323 armnn::ResizeBilinearQueueDescriptor descriptor;
5324 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005325 armnn::WorkloadInfo info;
5326 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5327 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5328
5329 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5330
5331 inputHandle->Allocate();
5332 outputHandle->Allocate();
5333 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5334
Derek Lambertif30f7d32019-04-09 10:25:02 +01005335 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005336 workload->Execute();
5337
5338 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5339 return result;
5340}
5341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005342LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5343 armnn::IWorkloadFactory& workloadFactory,
5344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005345 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005346{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005347 const armnn::TensorInfo inputTensorInfo =
5348 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5349
5350 const armnn::TensorInfo outputTensorInfo =
5351 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005352
James Conroy6b965822018-11-01 11:33:09 +00005353 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005354 1.0f, 2.0f, 3.0f, 4.0f,
5355 2.0f, 3.0f, 4.0f, 5.0f,
5356 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005357 4.0f, 5.0f, 6.0f, 7.0f,
5358
5359 7.0f, 6.0f, 5.0f, 4.0f,
5360 6.0f, 5.0f, 4.0f, 3.0f,
5361 5.0f, 4.0f, 3.0f, 2.0f,
5362 4.0f, 3.0f, 2.0f, 1.0f
5363 });
5364
5365 std::vector<float> outputData({
5366 1.0f, 3.0f,
5367 3.0f, 5.0f,
5368
5369 7.0f, 5.0f,
5370 5.0f, 3.0f
5371 });
5372
5373 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005374 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005375 {
5376 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005377 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005378 inputData = tmp;
5379
5380 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005381 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005382 outputData = tmp1;
5383 }
5384
5385 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005386
telsoa014fcda012018-03-09 14:13:49 +00005387 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005388 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005389
5390 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5391 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5392
5393 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005394 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005395 armnn::WorkloadInfo info;
5396 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5397 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5398
5399 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5400
5401 inputHandle->Allocate();
5402 outputHandle->Allocate();
5403 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5404
Derek Lambertif30f7d32019-04-09 10:25:02 +01005405 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005406 workload->Execute();
5407
5408 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5409 return result;
5410}
5411
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005412LayerTestResult<float, 4> ResizeBilinearMinTest(
5413 armnn::IWorkloadFactory& workloadFactory,
5414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005415 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005416{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005417 const armnn::TensorInfo inputTensorInfo =
5418 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5419
5420 const armnn::TensorInfo outputTensorInfo =
5421 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005422
James Conroy6b965822018-11-01 11:33:09 +00005423 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005424 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5425 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005426 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5427
5428 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5429 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5430 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5431 });
5432
5433 std::vector<float> outputData({
5434 1.0f, 2.6666f, 6.00f,
5435 78.5f, 179.3333f, 401.00f,
5436
5437 987.0f, 454.6670f, 203.33f,
5438 48.5f, 22.3333f, 10.00f
5439 });
5440
5441 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005442 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005443 {
5444 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005445 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005446 inputData = tmp;
5447
5448 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005449 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005450 outputData = tmp1;
5451 }
5452
5453 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005454
5455 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005456 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005457
5458 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5459 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5460
5461 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005462 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005463 armnn::WorkloadInfo info;
5464 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5465 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5466
5467 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5468
5469 inputHandle->Allocate();
5470 outputHandle->Allocate();
5471 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5472
Derek Lambertif30f7d32019-04-09 10:25:02 +01005473 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005474 workload->Execute();
5475
5476 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5477 return result;
5478}
5479
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005480LayerTestResult<float, 4> ResizeBilinearMagTest(
5481 armnn::IWorkloadFactory& workloadFactory,
5482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005483 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005484{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005485 const armnn::TensorInfo inputTensorInfo =
5486 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5487
5488 const armnn::TensorInfo outputTensorInfo =
5489 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005490
James Conroy6b965822018-11-01 11:33:09 +00005491 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005492 1.0f, 2.0f,
5493 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005494 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005495
James Conroy6b965822018-11-01 11:33:09 +00005496 233.0f, 144.0f,
5497 21.0f, 13.0f,
5498 2.0f, 1.0f
5499 });
5500
5501 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005502 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5503 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005504 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5505
5506 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5507 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5508 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5509 });
5510
5511 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005512 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005513 {
5514 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005515 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005516 inputData = tmp;
5517
5518 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005519 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005520 outputData = tmp1;
5521 }
5522
5523 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5524
5525 LayerTestResult<float, 4> result(outputTensorInfo);
5526 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005527
5528 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5529 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5530
5531 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005532 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005533 armnn::WorkloadInfo info;
5534 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5535 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5536
5537 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5538
5539 inputHandle->Allocate();
5540 outputHandle->Allocate();
5541 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5542
Derek Lambertif30f7d32019-04-09 10:25:02 +01005543 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005544 workload->Execute();
5545
5546 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5547 return result;
5548}
5549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005550LayerTestResult<float, 2> FakeQuantizationTest(
5551 armnn::IWorkloadFactory& workloadFactory,
5552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005553{
5554 constexpr unsigned int width = 2;
5555 constexpr unsigned int height = 3;
5556
5557 const armnn::TensorInfo tensorInfo({height, width },
5558 armnn::DataType::Float32);
5559 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5560 -10.0f, -5.0f,
5561 0.0f, 5.0f,
5562 10.0f, 10.0f
5563 }));
5564
5565 LayerTestResult<float, 2> ret(tensorInfo);
5566
5567 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5568
5569 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5570
5571 armnn::FakeQuantizationQueueDescriptor data;
5572 armnn::WorkloadInfo info;
5573
5574 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5575 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5576 float min = -10.f;
5577 float max = 10.f;
5578
5579 data.m_Parameters.m_Min = min;
5580 data.m_Parameters.m_Max = max;
5581
5582 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5583 armnn::FakeQuantizationQueueDescriptor refData = data;
5584 armnn::WorkloadInfo refInfo = info;
5585 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5586
5587 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5588
5589 inputHandle->Allocate();
5590 outputHandle->Allocate();
5591
5592 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5593
Derek Lambertif30f7d32019-04-09 10:25:02 +01005594 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005595 workload->Execute();
5596
5597 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5598
5599 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5600 0.0f, 63.0f,
5601 128.0f, 191.0f,
5602 255.0f, 255.0f
5603 }));
5604 return ret;
5605}
5606
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005607namespace
5608{
5609
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005610LayerTestResult<float, 4> L2NormalizationTestImpl(
5611 armnn::IWorkloadFactory& workloadFactory,
5612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5613 const armnn::TensorShape& inputOutputTensorShape,
5614 const std::vector<float>& inputValues,
5615 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005616 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005617{
5618 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5619 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5620
jimfly013aab7c32018-11-12 13:32:08 +00005621 // at this point if we require it permute the input data
5622 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5623 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005624 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005625 {
5626 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005627 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005628 inputData = tmp;
5629 }
5630
5631 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005632
5633 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005634 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005635 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005636 {
5637 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005638 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5639 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005640 expectedOutputData = tmp;
5641 }
5642 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005643
5644 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5645 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5646
5647 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005648 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005649 armnn::WorkloadInfo info;
5650
5651 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5652 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5653
5654 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5655
5656 inputHandle->Allocate();
5657 outputHandle->Allocate();
5658
5659 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5660
Derek Lambertif30f7d32019-04-09 10:25:02 +01005661 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005662 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005663
5664 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5665
5666 return result;
5667}
5668
5669float CalcInvL2Norm(std::initializer_list<float> elements)
5670{
5671 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5672 [](float acc, float element) { return acc + element * element; });
5673 return 1.0f / sqrtf(reduction);
5674}
5675
5676} // anonymous namespace
5677
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005678template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005679LayerTestResult<T, 2> Pad2dTestCommon(
5680 armnn::IWorkloadFactory& workloadFactory,
5681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5682 float qScale,
5683 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005684{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005685 const armnn::TensorShape inputShape{ 3, 3 };
5686 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005687
Derek Lambertif30f7d32019-04-09 10:25:02 +01005688 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5689 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005690
Derek Lambertif30f7d32019-04-09 10:25:02 +01005691 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005692 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005693 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005694 // Height (3) x Width (3)
5695 4, 8, 6,
5696 7, 4, 4,
5697 3, 2, 4
5698 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005699
Derek Lambertif30f7d32019-04-09 10:25:02 +01005700 std::vector<T> expectedOutputValues(
5701 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005702 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005703 0, 0, 0, 0, 0, 0, 0,
5704 0, 0, 0, 0, 0, 0, 0,
5705 0, 0, 4, 8, 6, 0, 0,
5706 0, 0, 7, 4, 4, 0, 0,
5707 0, 0, 3, 2, 4, 0, 0,
5708 0, 0, 0, 0, 0, 0, 0,
5709 0, 0, 0, 0, 0, 0, 0
5710 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005711
Derek Lambertif30f7d32019-04-09 10:25:02 +01005712 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005713
Derek Lambertif30f7d32019-04-09 10:25:02 +01005714 LayerTestResult<T, 2> result(outputTensorInfo);
5715 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005716
Derek Lambertif30f7d32019-04-09 10:25:02 +01005717 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5718 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005719
Derek Lambertif30f7d32019-04-09 10:25:02 +01005720 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005721
Derek Lambertif30f7d32019-04-09 10:25:02 +01005722 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5723 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5724 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005725
Derek Lambertif30f7d32019-04-09 10:25:02 +01005726 descriptor.m_Parameters.m_PadList = PadList;
5727 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005728
Derek Lambertif30f7d32019-04-09 10:25:02 +01005729 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5730 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005731
Derek Lambertif30f7d32019-04-09 10:25:02 +01005732 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005733
Derek Lambertif30f7d32019-04-09 10:25:02 +01005734 inputHandle->Allocate();
5735 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005736
Derek Lambertif30f7d32019-04-09 10:25:02 +01005737 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005738
Derek Lambertif30f7d32019-04-09 10:25:02 +01005739 workload->PostAllocationConfigure();
5740 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005741
Derek Lambertif30f7d32019-04-09 10:25:02 +01005742 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005743
Derek Lambertif30f7d32019-04-09 10:25:02 +01005744 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005745}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005746
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005747template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005748LayerTestResult<T, 3> Pad3dTestCommon(
5749 armnn::IWorkloadFactory& workloadFactory,
5750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5751 float qScale,
5752 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005753{
5754 const armnn::TensorShape inputShape{ 2, 2, 2 };
5755 const armnn::TensorShape outputShape{ 3, 5, 6 };
5756
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005757 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5758 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005759
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005760 std::vector<T> inputValues(
5761 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005762 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005763 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005764 0, 4,
5765 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005766
5767 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005768 6, 1,
5769 5, 2
5770 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005771
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005772 std::vector<T> expectedOutputValues(
5773 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005774 {
5775
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005776 0, 0, 0, 0, 0, 0,
5777 0, 0, 0, 0, 0, 0,
5778 0, 0, 0, 4, 0, 0,
5779 0, 0, 2, 5, 0, 0,
5780 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005781
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005782 0, 0, 0, 0, 0, 0,
5783 0, 0, 0, 0, 0, 0,
5784 0, 0, 6, 1, 0, 0,
5785 0, 0, 5, 2, 0, 0,
5786 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005787
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005788 0, 0, 0, 0, 0, 0,
5789 0, 0, 0, 0, 0, 0,
5790 0, 0, 0, 0, 0, 0,
5791 0, 0, 0, 0, 0, 0,
5792 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005793
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005794 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005795
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005796 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005797
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005798 LayerTestResult<T, 3> result(outputTensorInfo);
5799 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005800
5801 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5802 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5803
5804 armnn::PadQueueDescriptor descriptor;
5805
5806 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5807 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5808 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5809 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5810
5811 descriptor.m_Parameters.m_PadList = PadList;
5812 armnn::WorkloadInfo info;
5813
5814 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5815 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5816
5817 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5818
5819 inputHandle->Allocate();
5820 outputHandle->Allocate();
5821
5822 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5823
Derek Lambertif30f7d32019-04-09 10:25:02 +01005824 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005825 workload->Execute();
5826
5827 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5828
5829 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005830}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005831
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005832template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005833LayerTestResult<T, 4> Pad4dTestCommon(
5834 armnn::IWorkloadFactory& workloadFactory,
5835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5836 float qScale,
5837 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005838{
5839 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5840 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5841
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005842 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5843 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005844
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005845 std::vector<T> inputValues(
5846 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005847 {
5848 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005849 0, 1,
5850 2, 3,
5851 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005852
5853 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005854 6, 7,
5855 8, 9,
5856 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005857
5858 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005859 12, 13,
5860 14, 15,
5861 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005862
5863 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005864 18, 19,
5865 20, 21,
5866 22, 23
5867 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005868
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005869 std::vector<T> expectedOutputValues(
5870 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005871 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005872 0, 0, 0, 0,
5873 0, 0, 0, 0,
5874 0, 0, 0, 0,
5875 0, 0, 0, 0,
5876 0, 0, 0, 0,
5877 0, 0, 0, 0,
5878 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005879
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005880 0, 0, 0, 0,
5881 0, 0, 0, 0,
5882 0, 0, 0, 0,
5883 0, 0, 0, 0,
5884 0, 0, 0, 0,
5885 0, 0, 0, 0,
5886 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005887
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005888 0, 0, 0, 0,
5889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
5892 0, 0, 0, 0,
5893 0, 0, 0, 0,
5894 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005895
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005896 0, 0, 0, 0,
5897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
5900 0, 0, 0, 0,
5901 0, 0, 0, 0,
5902 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005903
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005904 0, 0, 0, 0,
5905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 0, 0, 0,
5908 0, 0, 0, 0,
5909 0, 0, 0, 0,
5910 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005911
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005912 0, 0, 0, 0,
5913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 0, 0, 0,
5916 0, 0, 0, 0,
5917 0, 0, 0, 0,
5918 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005919
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005920 0, 0, 0, 0,
5921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
5924 0, 0, 0, 0,
5925 0, 0, 0, 0,
5926 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005928 0, 0, 0, 0,
5929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 1, 0,
5932 0, 2, 3, 0,
5933 0, 4, 5, 0,
5934 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005935
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005936 0, 0, 0, 0,
5937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 6, 7, 0,
5940 0, 8, 9, 0,
5941 0, 10, 11, 0,
5942 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005943
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005944 0, 0, 0, 0,
5945 0, 0, 0, 0,
5946 0, 0, 0, 0,
5947 0, 0, 0, 0,
5948 0, 0, 0, 0,
5949 0, 0, 0, 0,
5950 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005951
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005952 0, 0, 0, 0,
5953 0, 0, 0, 0,
5954 0, 0, 0, 0,
5955 0, 0, 0, 0,
5956 0, 0, 0, 0,
5957 0, 0, 0, 0,
5958 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005959
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005960 0, 0, 0, 0,
5961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
5964 0, 0, 0, 0,
5965 0, 0, 0, 0,
5966 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005967
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005968 0, 0, 0, 0,
5969 0, 0, 0, 0,
5970 0, 0, 0, 0,
5971 0, 12, 13, 0,
5972 0, 14, 15, 0,
5973 0, 16, 17, 0,
5974 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005975
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005976 0, 0, 0, 0,
5977 0, 0, 0, 0,
5978 0, 0, 0, 0,
5979 0, 18, 19, 0,
5980 0, 20, 21, 0,
5981 0, 22, 23, 0,
5982 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005983
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005984 0, 0, 0, 0,
5985 0, 0, 0, 0,
5986 0, 0, 0, 0,
5987 0, 0, 0, 0,
5988 0, 0, 0, 0,
5989 0, 0, 0, 0,
5990 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005991
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005992 0, 0, 0, 0,
5993 0, 0, 0, 0,
5994 0, 0, 0, 0,
5995 0, 0, 0, 0,
5996 0, 0, 0, 0,
5997 0, 0, 0, 0,
5998 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005999
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006000 0, 0, 0, 0,
6001 0, 0, 0, 0,
6002 0, 0, 0, 0,
6003 0, 0, 0, 0,
6004 0, 0, 0, 0,
6005 0, 0, 0, 0,
6006 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006007
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006008 0, 0, 0, 0,
6009 0, 0, 0, 0,
6010 0, 0, 0, 0,
6011 0, 0, 0, 0,
6012 0, 0, 0, 0,
6013 0, 0, 0, 0,
6014 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006015
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006016 0, 0, 0, 0,
6017 0, 0, 0, 0,
6018 0, 0, 0, 0,
6019 0, 0, 0, 0,
6020 0, 0, 0, 0,
6021 0, 0, 0, 0,
6022 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006023
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006024 0, 0, 0, 0,
6025 0, 0, 0, 0,
6026 0, 0, 0, 0,
6027 0, 0, 0, 0,
6028 0, 0, 0, 0,
6029 0, 0, 0, 0,
6030 0, 0, 0, 0
6031 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006032
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006033 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006034
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006035 LayerTestResult<T, 4> result(outputTensorInfo);
6036 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006037
6038 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6039 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6040
6041 armnn::PadQueueDescriptor descriptor;
6042
6043 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6044 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6045 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6046 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6047 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6048
6049 descriptor.m_Parameters.m_PadList = PadList;
6050 armnn::WorkloadInfo info;
6051
6052 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6053 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6054
6055 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6056
6057 inputHandle->Allocate();
6058 outputHandle->Allocate();
6059
6060 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6061
Derek Lambertif30f7d32019-04-09 10:25:02 +01006062 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006063 workload->Execute();
6064
6065 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6066
6067 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006068}
6069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006070LayerTestResult<uint8_t, 2> PadUint82dTest(
6071 armnn::IWorkloadFactory& workloadFactory,
6072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006073{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006074 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006075}
6076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006077LayerTestResult<uint8_t, 3> PadUint83dTest(
6078 armnn::IWorkloadFactory& workloadFactory,
6079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006080{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006081 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006082}
6083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006084LayerTestResult<uint8_t, 4> PadUint84dTest(
6085 armnn::IWorkloadFactory& workloadFactory,
6086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006087{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006088 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006089}
6090
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006091LayerTestResult<float, 2> PadFloat322dTest(
6092 armnn::IWorkloadFactory& workloadFactory,
6093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006094{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006095 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006096}
6097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006098LayerTestResult<float, 3> PadFloat323dTest(
6099 armnn::IWorkloadFactory& workloadFactory,
6100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006101{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006102 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006103}
6104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006105LayerTestResult<float, 4> PadFloat324dTest(
6106 armnn::IWorkloadFactory& workloadFactory,
6107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006108{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006109 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006110}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006111
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006112LayerTestResult<float, 4> L2Normalization1dTest(
6113 armnn::IWorkloadFactory& workloadFactory,
6114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006115 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006116{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006117 // Width: 1
6118 // Height: 1
6119 // Channels: 10
6120 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006121 unsigned int numberOfBatches = 1;
6122 unsigned int numberOfChannels = 10;
6123 unsigned int height = 1;
6124 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006125
jimfly013aab7c32018-11-12 13:32:08 +00006126
Nina Drozdd41b2592018-11-19 13:03:36 +00006127 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006128 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006129 std::vector<float> inputValues
6130 {
6131 // Batch 0, Channel 0, Height (1) x Width (1)
6132 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006133
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006134 // Batch 0, Channel 1, Height (1) x Width (1)
6135 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006136
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006137 // Batch 0, Channel 2, Height (1) x Width (1)
6138 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006139
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006140 // Batch 0, Channel 3, Height (1) x Width (1)
6141 4.0f,
6142
6143 // Batch 0, Channel 4, Height (1) x Width (1)
6144 5.0f,
6145
6146 // Batch 0, Channel 5, Height (1) x Width (1)
6147 6.0f,
6148
6149 // Batch 0, Channel 6, Height (1) x Width (1)
6150 7.0f,
6151
6152 // Batch 0, Channel 7, Height (1) x Width (1)
6153 8.0f,
6154
6155 // Batch 0, Channel 8, Height (1) x Width (1)
6156 9.0f,
6157
6158 // Batch 0, Channel 9, Height (1) x Width (1)
6159 10.0f
6160 };
telsoa014fcda012018-03-09 14:13:49 +00006161 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006162 std::vector<float> expectedOutputValues
6163 {
6164 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00006165 1.0f * approxInvL2Norm,
6166 2.0f * approxInvL2Norm,
6167 3.0f * approxInvL2Norm,
6168 4.0f * approxInvL2Norm,
6169 5.0f * approxInvL2Norm,
6170 6.0f * approxInvL2Norm,
6171 7.0f * approxInvL2Norm,
6172 8.0f * approxInvL2Norm,
6173 9.0f * approxInvL2Norm,
6174 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006175 };
telsoa014fcda012018-03-09 14:13:49 +00006176
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006177
6178 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006179 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006180}
6181
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006182LayerTestResult<float, 4> L2Normalization2dTest(
6183 armnn::IWorkloadFactory& workloadFactory,
6184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006185 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006186{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006187 // Width: 5
6188 // Height: 1
6189 // Channels: 2
6190 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006191 unsigned int numberOfBatches = 1;
6192 unsigned int numberOfChannels = 2;
6193 unsigned int height = 1;
6194 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006195
Nina Drozdd41b2592018-11-19 13:03:36 +00006196 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006197 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006198 std::vector<float> inputValues
6199 {
6200 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006201 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006202
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006203 // Batch 0, Channel 1, Height (1) x Width (5)
6204 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6205 };
6206 std::vector<float> expectedOutputValues
6207 {
6208 // Batch 0, Channel 0, Height (1) x Width (5)
6209 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6210 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6211 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6212 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006213 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
6214
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006215 // Batch 0, Channel 1, Height (1) x Width (5)
6216 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6217 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6218 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6219 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006220 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006221 };
telsoa014fcda012018-03-09 14:13:49 +00006222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006223 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006224 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006225}
telsoa014fcda012018-03-09 14:13:49 +00006226
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006227LayerTestResult<float, 4> L2Normalization3dTest(
6228 armnn::IWorkloadFactory& workloadFactory,
6229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006230 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006231{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006232 // Width: 3
6233 // Height: 4
6234 // Channels: 2
6235 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006236 unsigned int numberOfBatches = 1;
6237 unsigned int numberOfChannels = 2;
6238 unsigned int height = 4;
6239 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006240
Nina Drozdd41b2592018-11-19 13:03:36 +00006241 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006242 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006243 std::vector<float> inputValues
6244 {
6245 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006246 119.0f, 21.0f, 150.0f,
6247 149.0f, 32.0f, 179.0f,
6248 15.0f, 227.0f, 141.0f,
6249 147.0f, 199.0f, 220.0f,
6250
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006251 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006252 110.0f, 140.0f, 73.0f,
6253 211.0f, 212.0f, 89.0f,
6254 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006255 162.0f, 12.0f, 161.0f
6256 };
6257 std::vector<float> expectedOutputValues
6258 {
6259 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006260 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6261 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6262 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6263 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6264 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6265 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6266 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6267 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6268 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6269 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6270 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6271 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6272
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006273 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006274 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6275 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6276 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6277 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6278 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6279 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6280 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6281 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6282 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6283 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6284 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006285 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6286 };
telsoa014fcda012018-03-09 14:13:49 +00006287
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006288 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006289 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006290}
telsoa014fcda012018-03-09 14:13:49 +00006291
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006292LayerTestResult<float, 4> L2Normalization4dTest(
6293 armnn::IWorkloadFactory& workloadFactory,
6294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006295 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006296{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006297 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006298 // Height: 4
6299 // Channels: 3
6300 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006301 unsigned int numberOfBatches = 2;
6302 unsigned int numberOfChannels = 3;
6303 unsigned int height = 4;
6304 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006305
Nina Drozdd41b2592018-11-19 13:03:36 +00006306 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006307 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006308 std::vector<float> inputValues
6309 {
6310 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006311 235.0f, 46.0f, 178.0f,
6312 100.0f, 123.0f, 19.0f,
6313 172.0f, 74.0f, 250.0f,
6314 6.0f, 195.0f, 80.0f,
6315
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006316 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006317 113.0f, 95.0f, 202.0f,
6318 77.0f, 114.0f, 71.0f,
6319 122.0f, 246.0f, 166.0f,
6320 82.0f, 28.0f, 37.0f,
6321
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006322 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006323 56.0f, 170.0f, 162.0f,
6324 194.0f, 89.0f, 254.0f,
6325 12.0f, 209.0f, 200.0f,
6326 1.0f, 64.0f, 54.0f,
6327
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006328 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006329 67.0f, 90.0f, 49.0f,
6330 7.0f, 163.0f, 18.0f,
6331 25.0f, 117.0f, 103.0f,
6332 247.0f, 59.0f, 189.0f,
6333
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006334 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006335 239.0f, 104.0f, 199.0f,
6336 17.0f, 124.0f, 153.0f,
6337 222.0f, 217.0f, 75.0f,
6338 32.0f, 126.0f, 21.0f,
6339
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006340 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006341 97.0f, 145.0f, 215.0f,
6342 115.0f, 116.0f, 238.0f,
6343 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006344 92.0f, 125.0f, 88.0f
6345 };
6346 std::vector<float> expectedOutputValues
6347 {
6348 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006349 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6350 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6351 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6352 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6353 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6354 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6355 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6356 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6357 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6358 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6359 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6360 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6361
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006362 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006363 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6364 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6365 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6366 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6367 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6368 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6369 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6370 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6371 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6372 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6373 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6374 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6375
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006376 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006377 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6378 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6379 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6380 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6381 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6382 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6383 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6384 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6385 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6386 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6387 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6388 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6389
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006390 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006391 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6392 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6393 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6394 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6395 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6396 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6397 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6398 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6399 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6400 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6401 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6402 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6403
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006404 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006405 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6406 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6407 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6408 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6409 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6410 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6411 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6412 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6413 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6414 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6415 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6416 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6417
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006418 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006419 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6420 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6421 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6422 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6423 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6424 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6425 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6426 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6427 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6428 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6429 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006430 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6431 };
telsoa014fcda012018-03-09 14:13:49 +00006432
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006433 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006434 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006435}
6436
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006437template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006438LayerTestResult<T, 4> ConstantTestImpl(
6439 armnn::IWorkloadFactory& workloadFactory,
6440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006441 float qScale,
6442 int32_t qOffset)
6443{
6444 constexpr unsigned int inputWidth = 3;
6445 constexpr unsigned int inputHeight = 4;
6446 constexpr unsigned int inputChannels = 3;
6447 constexpr unsigned int inputBatchSize = 2;
6448
6449 constexpr unsigned int outputWidth = inputWidth;
6450 constexpr unsigned int outputHeight = inputHeight;
6451 constexpr unsigned int outputChannels = inputChannels;
6452 constexpr unsigned int outputBatchSize = inputBatchSize;
6453
Nina Drozd58ef2c62019-05-16 12:09:18 +01006454 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6455 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006456
Nina Drozd58ef2c62019-05-16 12:09:18 +01006457 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6458 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006459
6460 // Set quantization parameters if the requested type is a quantized type.
6461 if(armnn::IsQuantizedType<T>())
6462 {
6463 inputTensorInfo.SetQuantizationScale(qScale);
6464 inputTensorInfo.SetQuantizationOffset(qOffset);
6465 outputTensorInfo.SetQuantizationScale(qScale);
6466 outputTensorInfo.SetQuantizationOffset(qOffset);
6467 }
6468
6469 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6470 QuantizedVector<T>(qScale, qOffset, {
6471 // Batch 0, Channel 0
6472 235.0f, 46.0f, 178.0f,
6473 100.0f, 123.0f, 19.0f,
6474 172.0f, 74.0f, 250.0f,
6475 6.0f, 195.0f, 80.0f,
6476
6477 // Batch 0, Channel 1
6478 113.0f, 95.0f, 202.0f,
6479 77.0f, 114.0f, 71.0f,
6480 122.0f, 246.0f, 166.0f,
6481 82.0f, 28.0f, 37.0f,
6482
6483 // Batch 0, Channel 2
6484 56.0f, 170.0f, 162.0f,
6485 194.0f, 89.0f, 254.0f,
6486 12.0f, 209.0f, 200.0f,
6487 1.0f, 64.0f, 54.0f,
6488
6489 // Batch 1, Channel 0
6490 67.0f, 90.0f, 49.0f,
6491 7.0f, 163.0f, 18.0f,
6492 25.0f, 117.0f, 103.0f,
6493 247.0f, 59.0f, 189.0f,
6494
6495 // Batch 1, Channel 1
6496 239.0f, 104.0f, 199.0f,
6497 17.0f, 124.0f, 153.0f,
6498 222.0f, 217.0f, 75.0f,
6499 32.0f, 126.0f, 21.0f,
6500
6501 // Batch 1, Channel 2
6502 97.0f, 145.0f, 215.0f,
6503 115.0f, 116.0f, 238.0f,
6504 226.0f, 16.0f, 132.0f,
6505 92.0f, 125.0f, 88.0f,
6506 })));
6507
6508 LayerTestResult<T, 4> result(outputTensorInfo);
6509 result.outputExpected = input;
6510
6511 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6512
6513 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6514 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6515
6516 armnn::ConstantQueueDescriptor descriptor;
6517 descriptor.m_LayerOutput = &constantTensor;
6518
6519 armnn::WorkloadInfo info;
6520 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6521
6522 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6523
6524 outputHandle->Allocate();
6525
Derek Lambertif30f7d32019-04-09 10:25:02 +01006526 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006527 workload->Execute();
6528
6529 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6530 return result;
6531}
6532
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006533LayerTestResult<float, 4> ConstantTest(
6534 armnn::IWorkloadFactory& workloadFactory,
6535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006536{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006537 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006538}
6539
Nina Drozd58ef2c62019-05-16 12:09:18 +01006540LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6541 armnn::IWorkloadFactory& workloadFactory,
6542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6543{
6544 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6545}
6546
6547LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006548 armnn::IWorkloadFactory& workloadFactory,
6549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006550{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006551 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006552}
6553
Jim Flynn4ed6c832019-05-20 11:02:46 +01006554LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006555 armnn::IWorkloadFactory& workloadFactory,
6556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6557{
6558 unsigned int outputWidth = 3;
6559 unsigned int outputHeight = 6;
6560 unsigned int outputChannels = 3;
6561
6562 unsigned int inputWidth1 = 3;
6563 unsigned int inputHeight1 = 6;
6564 unsigned int inputChannels1 = 2;
6565
6566 unsigned int inputWidth2 = 3;
6567 unsigned int inputHeight2 = 6;
6568 unsigned int inputChannels2 = 1;
6569
6570 // Defines the tensor descriptors.
6571 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6572 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6573 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6574
6575 // Quantized input1 tensor. Range [-3, 1]
6576 const float inputScale1 = 0.015686f;
6577 const int32_t inputOffset1 = 192;
6578
6579 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6580 {
6581 1, 2, 3,
6582 4, 5, 6,
6583 7, 8, 9,
6584 10, 11, 12,
6585 13, 14, 15,
6586 16, 17, 18,
6587
6588 19, 20, 21,
6589 22, 23, 24,
6590 25, 26, 27,
6591 28, 29, 30,
6592 31, 32, 33,
6593 34, 35, 36,
6594 })
6595 );
6596
6597 // Quatized input2 tensor. Range [-1, 4]
6598 const float inputScale2 = 0.019608f;
6599 const int32_t inputOffset2 = 50;
6600
6601 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6602 {
6603 37, 38, 39,
6604 40, 41, 42,
6605 43, 44, 45,
6606 46, 47, 48,
6607 49, 50, 51,
6608 52, 53, 54,
6609 })
6610 );
6611
6612 // Output has the same quantization parameters than input1,
6613 // so that only the requantization of input2 is required
6614 const float outputScale = 0.015686f;
6615 const int32_t outputOffset = 192;
6616
6617 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6618
6619 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6620 {
6621 1, 2, 3,
6622 4, 5, 6,
6623 7, 8, 9,
6624 10, 11, 12,
6625 13, 14, 15,
6626 16, 17, 18,
6627
6628 19, 20, 21,
6629 22, 23, 24,
6630 25, 26, 27,
6631 28, 29, 30,
6632 31, 32, 33,
6633 34, 35, 36,
6634
6635 176, 177, 178,
6636 179, 181, 182,
6637 183, 184, 186,
6638 187, 188, 189,
6639 191, 192, 193,
6640 195, 196, 197,
6641 })
6642 );
6643
6644 outputTensorInfo.SetQuantizationScale(outputScale);
6645 outputTensorInfo.SetQuantizationOffset(outputOffset);
6646 inputTensorInfo1.SetQuantizationScale(inputScale1);
6647 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6648 inputTensorInfo2.SetQuantizationScale(inputScale2);
6649 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6650
6651 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006652 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006653
6654 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006655 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006656
6657 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6658
6659 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6660
6661 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6662 subTensorsSupported ?
6663 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6664 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6665
6666 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6667 subTensorsSupported ?
6668 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6669 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6670
Jim Flynne242f2d2019-05-22 14:24:13 +01006671 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00006672 armnn::WorkloadInfo info;
6673 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6674 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6675 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6676
6677 data.m_ViewOrigins.push_back(window1);
6678 data.m_ViewOrigins.push_back(window2);
6679
Jim Flynn4ed6c832019-05-20 11:02:46 +01006680 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006681
6682 inputHandle1->Allocate();
6683 inputHandle2->Allocate();
6684 outputHandle->Allocate();
6685
6686 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6687 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6688
Derek Lambertif30f7d32019-04-09 10:25:02 +01006689 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006690 workload->Execute();
6691
6692 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6693
6694 return ret;
6695}
6696
Jim Flynn4ed6c832019-05-20 11:02:46 +01006697LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006698 armnn::IWorkloadFactory& workloadFactory,
6699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006700{
surmeh013537c2c2018-05-18 16:31:43 +01006701 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006702 unsigned int outputHeight = 6;
6703 unsigned int outputChannels = 3;
6704
surmeh013537c2c2018-05-18 16:31:43 +01006705 unsigned int inputWidth1 = 3;
6706 unsigned int inputHeight1 = 6;
6707 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006708
surmeh013537c2c2018-05-18 16:31:43 +01006709 unsigned int inputWidth2 = 3;
6710 unsigned int inputHeight2 = 6;
6711 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006712
telsoa01c577f2c2018-08-31 09:22:23 +01006713 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006714 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6715 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6716 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006717
Jim Flynn4ed6c832019-05-20 11:02:46 +01006718 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006719 const float scale = 0.13497836f;
6720 const int32_t offset = -7;
6721
6722 outputTensorInfo.SetQuantizationScale(scale);
6723 outputTensorInfo.SetQuantizationOffset(offset);
6724 inputTensorInfo1.SetQuantizationScale(scale);
6725 inputTensorInfo1.SetQuantizationOffset(offset);
6726 inputTensorInfo2.SetQuantizationScale(scale);
6727 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006728
6729 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6730
6731 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006732 {
6733 1, 2, 3,
6734 4, 5, 6,
6735 7, 8, 9,
6736 10, 11, 12,
6737 13, 14, 15,
6738 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006739
surmeh013537c2c2018-05-18 16:31:43 +01006740 19, 20, 21,
6741 22, 23, 24,
6742 25, 26, 27,
6743 28, 29, 30,
6744 31, 32, 33,
6745 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006746
surmeh013537c2c2018-05-18 16:31:43 +01006747 37, 38, 39,
6748 40, 41, 42,
6749 43, 44, 45,
6750 46, 47, 48,
6751 49, 50, 51,
6752 52, 53, 54,
6753 })
telsoa014fcda012018-03-09 14:13:49 +00006754 );
6755
telsoa014fcda012018-03-09 14:13:49 +00006756 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6757 {
surmeh013537c2c2018-05-18 16:31:43 +01006758 1, 2, 3,
6759 4, 5, 6,
6760 7, 8, 9,
6761 10, 11, 12,
6762 13, 14, 15,
6763 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006764
surmeh013537c2c2018-05-18 16:31:43 +01006765 19, 20, 21,
6766 22, 23, 24,
6767 25, 26, 27,
6768 28, 29, 30,
6769 31, 32, 33,
6770 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006771 })
6772 );
6773
6774 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6775 {
surmeh013537c2c2018-05-18 16:31:43 +01006776 37, 38, 39,
6777 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006778 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006779 46, 47, 48,
6780 49, 50, 51,
6781 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006782 })
6783 );
6784
telsoa01c577f2c2018-08-31 09:22:23 +01006785 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006786 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00006787
telsoa01c577f2c2018-08-31 09:22:23 +01006788 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006789 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00006790
telsoa014fcda012018-03-09 14:13:49 +00006791
6792 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6793
6794 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6795
6796 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6797 subTensorsSupported ?
6798 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6799 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6800
6801 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6802 subTensorsSupported ?
6803 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6804 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6805
telsoa014fcda012018-03-09 14:13:49 +00006806
Jim Flynne242f2d2019-05-22 14:24:13 +01006807 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00006808 armnn::WorkloadInfo info;
6809 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6810 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006811 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6812
6813 data.m_ViewOrigins.push_back(window1);
6814 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006815
Jim Flynn4ed6c832019-05-20 11:02:46 +01006816 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00006817
6818 inputHandle1->Allocate();
6819 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006820 outputHandle->Allocate();
6821
6822 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6823 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006824
Derek Lambertif30f7d32019-04-09 10:25:02 +01006825 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006826 workload->Execute();
6827
6828 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6829
6830 return ret;
6831}
6832
Jim Flynn4ed6c832019-05-20 11:02:46 +01006833LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01006834 armnn::IWorkloadFactory& workloadFactory,
6835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6836{
6837 unsigned int outputWidth = 3;
6838 unsigned int outputHeight = 6;
6839 unsigned int outputChannels = 3;
6840
6841 unsigned int inputWidth1 = 3;
6842 unsigned int inputHeight1 = 6;
6843 unsigned int inputChannels1 = 2;
6844
6845 unsigned int inputWidth2 = 3;
6846 unsigned int inputHeight2 = 6;
6847 unsigned int inputChannels2 = 1;
6848
6849 // Defines the tensor descriptors.
6850 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
6851 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
6852 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
6853
Jim Flynn4ed6c832019-05-20 11:02:46 +01006854 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01006855 const float scale = 0.13497836f;
6856 const int32_t offset = -7;
6857
6858 outputTensorInfo.SetQuantizationScale(scale);
6859 outputTensorInfo.SetQuantizationOffset(offset);
6860 inputTensorInfo1.SetQuantizationScale(scale);
6861 inputTensorInfo1.SetQuantizationOffset(offset);
6862 inputTensorInfo2.SetQuantizationScale(scale);
6863 inputTensorInfo2.SetQuantizationOffset(offset);
6864
6865 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
6866
6867 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
6868 {
6869 1, 2, 3,
6870 4, 5, 6,
6871 7, 8, 9,
6872 10, 11, 12,
6873 13, 14, 15,
6874 16, 17, 18,
6875
6876 19, 20, 21,
6877 22, 23, 24,
6878 25, 26, 27,
6879 28, 29, 30,
6880 31, 32, 33,
6881 34, 35, 36,
6882
6883 37, 38, 39,
6884 40, 41, 42,
6885 43, 44, 45,
6886 46, 47, 48,
6887 49, 50, 51,
6888 52, 53, 54,
6889 }));
6890
6891 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
6892 {
6893 1, 2, 3,
6894 4, 5, 6,
6895 7, 8, 9,
6896 10, 11, 12,
6897 13, 14, 15,
6898 16, 17, 18,
6899
6900 19, 20, 21,
6901 22, 23, 24,
6902 25, 26, 27,
6903 28, 29, 30,
6904 31, 32, 33,
6905 34, 35, 36,
6906 }));
6907
6908 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
6909 {
6910 37, 38, 39,
6911 40, 41, 42,
6912 43, 44, 45,
6913 46, 47, 48,
6914 49, 50, 51,
6915 52, 53, 54,
6916 }));
6917
6918 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006919 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006920
6921 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006922 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006923
6924
6925 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6926
6927 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6928
6929 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6930 subTensorsSupported ?
6931 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6932 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6933
6934 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6935 subTensorsSupported ?
6936 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6937 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6938
6939
Jim Flynne242f2d2019-05-22 14:24:13 +01006940 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006941 armnn::WorkloadInfo info;
6942 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6943 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6944 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6945
6946 data.m_ViewOrigins.push_back(window1);
6947 data.m_ViewOrigins.push_back(window2);
6948
Jim Flynn4ed6c832019-05-20 11:02:46 +01006949 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006950
6951 inputHandle1->Allocate();
6952 inputHandle2->Allocate();
6953 outputHandle->Allocate();
6954
6955 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6956 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6957
6958 workload->PostAllocationConfigure();
6959 workload->Execute();
6960
6961 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6962
6963 return ret;
6964}
telsoa014fcda012018-03-09 14:13:49 +00006965
surmeh01bceff2f2018-03-29 16:29:27 +01006966namespace
telsoa014fcda012018-03-09 14:13:49 +00006967{
Sadik Armagan2999a022019-04-09 14:20:12 +01006968template <typename T>
6969LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006970 armnn::IWorkloadFactory& workloadFactory,
6971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6972 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006973 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006974 float scale0,
6975 int32_t offset0,
6976 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006977 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006978 float scale1,
6979 int32_t offset1,
6980 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006981 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006982 float outScale,
6983 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006984{
Sadik Armagan2999a022019-04-09 14:20:12 +01006985 auto dataType = (std::is_same<T, uint8_t>::value ?
6986 armnn::DataType::QuantisedAsymm8 :
6987 armnn::DataType::QuantisedSymm16);
6988
6989 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6990 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6991 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00006992
surmeh01bceff2f2018-03-29 16:29:27 +01006993 inputTensorInfo0.SetQuantizationScale(scale0);
6994 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006995
surmeh01bceff2f2018-03-29 16:29:27 +01006996 inputTensorInfo1.SetQuantizationScale(scale1);
6997 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006998
surmeh01bceff2f2018-03-29 16:29:27 +01006999 outputTensorInfo.SetQuantizationScale(outScale);
7000 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00007001
Sadik Armagan2999a022019-04-09 14:20:12 +01007002 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7003 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00007004
Sadik Armagan2999a022019-04-09 14:20:12 +01007005 LayerTestResult<T, 4> result(outputTensorInfo);
7006 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7007
7008 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7009 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7010 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7011
7012 armnn::AdditionQueueDescriptor data;
7013 armnn::WorkloadInfo info;
7014 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7015 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7016 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7017
7018 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
7019
7020 inputHandle0->Allocate();
7021 inputHandle1->Allocate();
7022 outputHandle->Allocate();
7023
7024 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7025 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7026
Derek Lambertif30f7d32019-04-09 10:25:02 +01007027 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007028 workload->Execute();
7029
7030 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7031
7032 return result;
7033}
7034} // anonymous namespace
7035
7036LayerTestResult<uint8_t, 4> AdditionUint8Test(
7037 armnn::IWorkloadFactory& workloadFactory,
7038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7039{
7040 const unsigned int shape0[] = { 1, 2, 2, 3 };
7041 const unsigned int shape1[] = { 1, 2, 2, 3 };
7042
7043 std::vector<uint8_t> input0(
7044 {
7045 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7046 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7047 });
7048
7049 std::vector<uint8_t> input1(
7050 {
7051 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7052 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7053 });
7054
7055 std::vector<uint8_t> output(
7056 {
7057 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7058 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7059 });
7060
7061 return AdditionQuantizeTestHelper(workloadFactory,
7062 memoryManager,
7063 shape0, input0, 7.0f, 3,
7064 shape1, input1, 7.0f, 3,
7065 shape0, output, 7.0f, 3);
7066}
7067
7068LayerTestResult<int16_t, 4> AdditionInt16Test(
7069 armnn::IWorkloadFactory& workloadFactory,
7070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7071{
7072 const unsigned int shape0[] = { 1, 2, 2, 3 };
7073 const unsigned int shape1[] = { 1, 2, 2, 3 };
7074
7075 std::vector<int16_t> input0(
7076 {
7077 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7078 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7079 });
7080
7081 std::vector<int16_t> input1(
7082 {
7083 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7084 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7085 });
7086
7087 std::vector<int16_t> output(
7088 {
7089 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7090 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7091 });
7092
7093 return AdditionQuantizeTestHelper(workloadFactory,
7094 memoryManager,
7095 shape0, input0, 7.0f, 0,
7096 shape1, input1, 7.0f, 0,
7097 shape0, output, 7.0f, 0);
7098}
7099
7100namespace
7101{
7102template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7103LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7104 armnn::IWorkloadFactory& workloadFactory,
7105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7106 const unsigned int shape0[4],
7107 const std::vector<T> & values0,
7108 float scale0,
7109 int32_t offset0,
7110 const unsigned int shape1[4],
7111 const std::vector<T> & values1,
7112 float scale1,
7113 int32_t offset1,
7114 const unsigned int outShape[4],
7115 const std::vector<T> & outValues,
7116 float outScale,
7117 int32_t outOffset)
7118{
7119 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7120 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7121 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7122
7123 inputTensorInfo0.SetQuantizationScale(scale0);
7124 inputTensorInfo0.SetQuantizationOffset(offset0);
7125
7126 inputTensorInfo1.SetQuantizationScale(scale1);
7127 inputTensorInfo1.SetQuantizationOffset(offset1);
7128
7129 outputTensorInfo.SetQuantizationScale(outScale);
7130 outputTensorInfo.SetQuantizationOffset(outOffset);
7131
7132 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7133 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7134
7135 LayerTestResult<T, 4> result(outputTensorInfo);
7136 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007137
surmeh01bceff2f2018-03-29 16:29:27 +01007138 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007139 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007140 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7141
7142 armnn::MultiplicationQueueDescriptor data;
7143 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007144 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7145 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007146 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7147
7148 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7149
surmeh01bceff2f2018-03-29 16:29:27 +01007150 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007151 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007152 outputHandle->Allocate();
7153
surmeh01bceff2f2018-03-29 16:29:27 +01007154 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007155 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007156
Derek Lambertif30f7d32019-04-09 10:25:02 +01007157 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007158 workload->Execute();
7159
7160 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7161
7162 return result;
7163}
surmeh01bceff2f2018-03-29 16:29:27 +01007164} // anonymous namespace
7165
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007166LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7167 armnn::IWorkloadFactory& workloadFactory,
7168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007169{
7170 unsigned int batchSize = 1;
7171 unsigned int channels = 2;
7172 unsigned int height = 2;
7173 unsigned int width = 3;
7174 const unsigned int shape[] = { batchSize, channels, height, width };
7175
telsoa01c577f2c2018-08-31 09:22:23 +01007176 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007177 std::vector<uint8_t> input0({
7178 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7179 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7180 });
7181
telsoa01c577f2c2018-08-31 09:22:23 +01007182 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007183 std::vector<uint8_t> input1({
7184 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7185 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7186 });
7187
telsoa01c577f2c2018-08-31 09:22:23 +01007188 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007189 std::vector<uint8_t> output(
7190 {
7191 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7192 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7193 });
7194
Sadik Armagan2999a022019-04-09 14:20:12 +01007195 // Scale/offset chosen to have output values out of range.
7196 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7197 memoryManager,
7198 shape,
7199 input0,
7200 4.0f,
7201 1,
7202 shape,
7203 input1,
7204 3.0f,
7205 -2,
7206 shape,
7207 output,
7208 1366.255f,
7209 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007210}
7211
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007212LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7213 armnn::IWorkloadFactory& workloadFactory,
7214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007215{
7216 const unsigned int shape0[] = { 1, 2, 2, 3 };
7217 const unsigned int shape1[] = { 1, 1, 1, 1 };
7218
7219 std::vector<uint8_t> input0({
7220 1, 2, 3, 4, 5, 6,
7221 7, 8, 9, 10, 11, 12
7222 });
7223
7224 std::vector<uint8_t> input1({2});
7225
7226 std::vector<uint8_t> output({
7227 2, 4, 6, 8, 10, 12,
7228 14, 16, 18, 20, 22, 24
7229 });
7230
Sadik Armagan2999a022019-04-09 14:20:12 +01007231 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7232 memoryManager,
7233 shape0,
7234 input0,
7235 1.0f,
7236 0,
7237 shape1,
7238 input1,
7239 1.0f,
7240 0,
7241 shape0,
7242 output,
7243 1.0f,
7244 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007245}
7246
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007247LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7248 armnn::IWorkloadFactory& workloadFactory,
7249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007250{
7251 const unsigned int shape0[] = { 1, 2, 2, 3 };
7252 const unsigned int shape1[] = { 1, 1, 1, 3 };
7253
7254 std::vector<uint8_t> input0({
7255 1, 2, 3, 4, 5, 6,
7256 7, 8, 9, 10, 11, 12
7257 });
7258
7259 std::vector<uint8_t> input1({1, 2, 3});
7260
7261 std::vector<uint8_t> output({
7262 1, 4, 9, 4, 10, 18,
7263 7, 16, 27, 10, 22, 36
7264 });
7265
Sadik Armagan2999a022019-04-09 14:20:12 +01007266 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7267 memoryManager,
7268 shape0,
7269 input0,
7270 1.0f,
7271 0,
7272 shape1,
7273 input1,
7274 1.0f,
7275 0,
7276 shape0,
7277 output,
7278 1.0f,
7279 0);
7280}
7281
7282LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7283 armnn::IWorkloadFactory& workloadFactory,
7284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7285{
7286 const unsigned int shape[] = { 1, 2, 2, 3 };
7287
7288 std::vector<int16_t> input0(
7289 {
7290 6, 7, 8, 9, 10, 11,
7291 12, 13, 14, 15, 16, 17
7292 });
7293
7294 std::vector<int16_t> input1(
7295 {
7296 1, 2, 3, 4, 5, 6,
7297 7, 8, 9, 10, 11, 12
7298 });
7299
7300 std::vector<int16_t> output(
7301 {
7302 6, 14, 24, 36, 50, 66,
7303 84, 104, 126, 150, 176, 204
7304 });
7305
7306 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7307 memoryManager,
7308 shape,
7309 input0,
7310 1.0f,
7311 0,
7312 shape,
7313 input1,
7314 1.0f,
7315 0,
7316 shape,
7317 output,
7318 1.0f,
7319 0);
7320}
7321
7322LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7323 armnn::IWorkloadFactory& workloadFactory,
7324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7325{
7326 const unsigned int shape0[] = { 1, 2, 2, 3 };
7327 const unsigned int shape1[] = { 1, 1, 1, 1 };
7328
7329 std::vector<int16_t> input0(
7330 {
7331 1, 2, 3, 4, 5, 6,
7332 7, 8, 9, 10, 11, 12
7333 });
7334
7335 std::vector<int16_t> input1({2});
7336
7337 std::vector<int16_t> output(
7338 {
7339 2, 4, 6, 8, 10, 12,
7340 14, 16, 18, 20, 22, 24
7341 });
7342
7343 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7344 memoryManager,
7345 shape0,
7346 input0,
7347 1.0f,
7348 0,
7349 shape1,
7350 input1,
7351 1.0f,
7352 0,
7353 shape0,
7354 output,
7355 1.0f,
7356 0);
7357}
7358
7359LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7360 armnn::IWorkloadFactory& workloadFactory,
7361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7362{
7363 const unsigned int shape0[] = { 1, 2, 2, 3 };
7364 const unsigned int shape1[] = { 1, 1, 1, 3 };
7365
7366 std::vector<int16_t> input0(
7367 {
7368 1, 2, 3, 4, 5, 6,
7369 7, 8, 9, 10, 11, 12
7370 });
7371
7372 std::vector<int16_t> input1({1, 2, 3});
7373
7374 std::vector<int16_t> output(
7375 {
7376 1, 4, 9, 4, 10, 18,
7377 7, 16, 27, 10, 22, 36
7378 });
7379
7380 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7381 memoryManager,
7382 shape0,
7383 input0,
7384 1.0f,
7385 0,
7386 shape1,
7387 input1,
7388 1.0f,
7389 0,
7390 shape0,
7391 output,
7392 1.0f,
7393 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007394}
telsoa014fcda012018-03-09 14:13:49 +00007395
David Beckf195f032018-09-06 16:46:34 +01007396namespace
7397{
Sadik Armagan2999a022019-04-09 14:20:12 +01007398template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007399LayerTestResult<T, 4> SubtractionTestHelper(
7400 armnn::IWorkloadFactory& workloadFactory,
7401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7402 const unsigned int shape0[4],
7403 const std::vector<T>& values0,
7404 float scale0,
7405 int32_t offset0,
7406 const unsigned int shape1[4],
7407 const std::vector<T> & values1,
7408 float scale1,
7409 int32_t offset1,
7410 const unsigned int outShape[4],
7411 const std::vector<T> & outValues,
7412 float outScale,
7413 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007414{
Sadik Armagan2999a022019-04-09 14:20:12 +01007415 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7416 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7417 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007418
7419 inputTensorInfo0.SetQuantizationScale(scale0);
7420 inputTensorInfo0.SetQuantizationOffset(offset0);
7421
7422 inputTensorInfo1.SetQuantizationScale(scale1);
7423 inputTensorInfo1.SetQuantizationOffset(offset1);
7424
7425 outputTensorInfo.SetQuantizationScale(outScale);
7426 outputTensorInfo.SetQuantizationOffset(outOffset);
7427
7428 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7429 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7430
7431 LayerTestResult<T, 4> result(outputTensorInfo);
7432 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7433
7434 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7435 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7436 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7437
7438 armnn::SubtractionQueueDescriptor data;
7439 armnn::WorkloadInfo info;
7440 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7441 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7442 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7443
7444 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7445
7446 inputHandle0->Allocate();
7447 inputHandle1->Allocate();
7448 outputHandle->Allocate();
7449
7450 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7451 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7452
Derek Lambertif30f7d32019-04-09 10:25:02 +01007453 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007454 workload->Execute();
7455
7456 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7457
7458 return result;
7459}
7460} // anonymous namespace
7461
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007462LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7463 armnn::IWorkloadFactory& workloadFactory,
7464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007465{
7466 const unsigned int shape0[] = { 1, 1, 2, 2 };
7467 const unsigned int shape1[] = { 1, 1, 2, 2 };
7468
7469 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7470 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7471 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7472
Sadik Armagan2999a022019-04-09 14:20:12 +01007473 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7474 memoryManager,
7475 shape0, input0, 0.5f, 2,
7476 shape1, input1, 1.0f, 0,
7477 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007478}
7479
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007480LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7481 armnn::IWorkloadFactory& workloadFactory,
7482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007483{
7484 const unsigned int shape0[] = { 1, 1, 2, 2 };
7485 const unsigned int shape1[] = { 1, 1, 1, 1 };
7486
7487 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7488 std::vector<uint8_t> input1({ 2 });
7489 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7490
Sadik Armagan2999a022019-04-09 14:20:12 +01007491 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7492 memoryManager,
7493 shape0, input0, 0.5f, 2,
7494 shape1, input1, 1.0f, 0,
7495 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007496}
7497
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007498LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7499 armnn::IWorkloadFactory& workloadFactory,
7500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007501{
7502 const unsigned int shape0[] = { 1, 1, 2, 2 };
7503 const unsigned int shape1[] = { 1, 1, 2, 1 };
7504
7505 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7506 std::vector<uint8_t> input1({ 2, 1 });
7507 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7508
Sadik Armagan2999a022019-04-09 14:20:12 +01007509 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7510 memoryManager,
7511 shape0, input0, 1.0f, 0,
7512 shape1, input1, 1.0f, 0,
7513 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007514}
7515
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007516LayerTestResult<float, 4> SubtractionTest(
7517 armnn::IWorkloadFactory& workloadFactory,
7518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007519{
7520 const unsigned int shape0[] = { 1, 1, 2, 2 };
7521 const unsigned int shape1[] = { 1, 1, 2, 2 };
7522
7523 std::vector<float> input0({ 1, 2, 3, 4 });
7524 std::vector<float> input1({ 1, -1, 0, 2 });
7525 std::vector<float> output({ 0, 3, 3, 2 });
7526
Sadik Armagan2999a022019-04-09 14:20:12 +01007527 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7528 memoryManager,
7529 shape0, input0, 1.0f, 0,
7530 shape1, input1, 1.0f, 0,
7531 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007532}
7533
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007534LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7535 armnn::IWorkloadFactory& workloadFactory,
7536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007537{
7538 const unsigned int shape0[] = { 1, 1, 2, 2 };
7539 const unsigned int shape1[] = { 1, 1, 1, 1 };
7540
7541 std::vector<float> input0({ 1, 2, 3, 4 });
7542 std::vector<float> input1({ 10 });
7543 std::vector<float> output({ -9, -8, -7, -6 });
7544
Sadik Armagan2999a022019-04-09 14:20:12 +01007545 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7546 memoryManager,
7547 shape0, input0, 1.0f, 0,
7548 shape1, input1, 1.0f, 0,
7549 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007550}
7551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007552LayerTestResult<float, 4> SubtractionBroadcastTest(
7553 armnn::IWorkloadFactory& workloadFactory,
7554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007555{
7556 const unsigned int shape0[] = { 1, 1, 2, 2 };
7557 const unsigned int shape1[] = { 1, 1, 1, 2 };
7558
7559 std::vector<float> input0({ 1, 2, 3, 4 });
7560 std::vector<float> input1({ 10, -5 });
7561 std::vector<float> output({ -9, 7, -7, 9 });
7562
Sadik Armagan2999a022019-04-09 14:20:12 +01007563 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7564 memoryManager,
7565 shape0, input0, 1.0f, 0,
7566 shape1, input1, 1.0f, 0,
7567 shape0, output, 1.0f, 0);
7568}
7569
7570LayerTestResult<int16_t, 4> SubtractionInt16Test(
7571 armnn::IWorkloadFactory& workloadFactory,
7572 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7573{
7574 const unsigned int shape0[] = { 1, 1, 2, 2 };
7575 const unsigned int shape1[] = { 1, 1, 2, 2 };
7576
7577 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7578 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7579 std::vector<int16_t> output({ 3, 3, 5, 5 });
7580
7581 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7582 memoryManager,
7583 shape0, input0, 0.5f, 0,
7584 shape1, input1, 1.0f, 0,
7585 shape0, output, 1.0f, 0);
7586}
7587
7588LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7589 armnn::IWorkloadFactory& workloadFactory,
7590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7591{
7592 const unsigned int shape0[] = { 1, 1, 2, 2 };
7593 const unsigned int shape1[] = { 1, 1, 1, 1 };
7594
7595 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7596 std::vector<int16_t> input1({ 2 });
7597 std::vector<int16_t> output({ 3, 4, 5, 6 });
7598
7599 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7600 memoryManager,
7601 shape0, input0, 0.5f, 0,
7602 shape1, input1, 1.0f, 0,
7603 shape0, output, 1.0f, 0);
7604}
7605
7606LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7607 armnn::IWorkloadFactory& workloadFactory,
7608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7609{
7610 const unsigned int shape0[] = { 1, 1, 2, 2 };
7611 const unsigned int shape1[] = { 1, 1, 2, 1 };
7612
7613 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7614 std::vector<int16_t> input1({ 2, 1 });
7615 std::vector<int16_t> output({ 8, 11, 12, 15 });
7616
7617 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7618 memoryManager,
7619 shape0, input0, 1.0f, 0,
7620 shape1, input1, 1.0f, 0,
7621 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007622}
7623
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007624LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7625 armnn::IWorkloadFactory& workloadFactory,
7626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007627{
7628 constexpr unsigned int inputWidth = 4;
7629 constexpr unsigned int inputHeight = 4;
7630 constexpr unsigned int inputChannels = 1;
7631 constexpr unsigned int inputBatchSize = 1;
7632
7633 constexpr unsigned int outputWidth = inputWidth;
7634 constexpr unsigned int outputHeight = inputHeight;
7635 constexpr unsigned int outputChannels = inputChannels;
7636 constexpr unsigned int outputBatchSize = inputBatchSize;
7637
7638 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7639 armnn::DataType::QuantisedAsymm8);
7640 inputTensorInfo.SetQuantizationScale(1.5f);
7641 inputTensorInfo.SetQuantizationOffset(-3);
7642
7643 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7644 armnn::DataType::QuantisedAsymm8);
7645 outputTensorInfo.SetQuantizationScale(1.5f);
7646 outputTensorInfo.SetQuantizationOffset(-3);
7647
7648 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7649 1, 2, 3, 4,
7650 2, 3, 4, 5,
7651 3, 4, 5, 6,
7652 4, 5, 6, 7
7653 }));
7654
7655 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7656 result.outputExpected = input;
7657
7658 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7659 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7660
7661 armnn::ResizeBilinearQueueDescriptor descriptor;
7662 armnn::WorkloadInfo info;
7663 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7664 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7665
7666 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7667
7668 inputHandle->Allocate();
7669 outputHandle->Allocate();
7670 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7671
Derek Lambertif30f7d32019-04-09 10:25:02 +01007672 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007673 workload->Execute();
7674
7675 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7676 return result;
7677}
7678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007679LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7680 armnn::IWorkloadFactory& workloadFactory,
7681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007682{
7683 constexpr unsigned int inputWidth = 2;
7684 constexpr unsigned int inputHeight = 2;
7685 constexpr unsigned int inputChannels = 1;
7686 constexpr unsigned int inputBatchSize = 1;
7687
7688 constexpr unsigned int outputWidth = inputWidth / 2;
7689 constexpr unsigned int outputHeight = inputHeight / 2;
7690 constexpr unsigned int outputChannels = inputChannels;
7691 constexpr unsigned int outputBatchSize = inputBatchSize;
7692
7693 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7694 armnn::DataType::QuantisedAsymm8);
7695 inputTensorInfo.SetQuantizationScale(0.1567f);
7696 inputTensorInfo.SetQuantizationOffset(1);
7697
7698 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7699 armnn::DataType::QuantisedAsymm8);
7700 outputTensorInfo.SetQuantizationScale(0.1567f);
7701 outputTensorInfo.SetQuantizationOffset(1);
7702
7703 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7704 1, 255,
7705 200, 250
7706 }));
7707
7708 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7709 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007710 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007711 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7712 // the centre).
7713 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7714 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7715 1
7716 }));
7717
7718 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7719 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7720
7721 armnn::ResizeBilinearQueueDescriptor descriptor;
7722 armnn::WorkloadInfo info;
7723 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7724 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7725
7726 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7727
7728 inputHandle->Allocate();
7729 outputHandle->Allocate();
7730 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7731
Derek Lambertif30f7d32019-04-09 10:25:02 +01007732 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007733 workload->Execute();
7734
7735 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7736 return result;
7737}
7738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007739LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7740 armnn::IWorkloadFactory& workloadFactory,
7741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007742{
7743 constexpr unsigned int inputWidth = 4;
7744 constexpr unsigned int inputHeight = 4;
7745 constexpr unsigned int inputChannels = 1;
7746 constexpr unsigned int inputBatchSize = 1;
7747
7748 constexpr unsigned int outputWidth = inputWidth / 2;
7749 constexpr unsigned int outputHeight = inputHeight / 2;
7750 constexpr unsigned int outputChannels = inputChannels;
7751 constexpr unsigned int outputBatchSize = inputBatchSize;
7752
7753 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7754 armnn::DataType::QuantisedAsymm8);
7755 inputTensorInfo.SetQuantizationScale(3.141592f);
7756 inputTensorInfo.SetQuantizationOffset(3);
7757
7758 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7759 armnn::DataType::QuantisedAsymm8);
7760 outputTensorInfo.SetQuantizationScale(3.141592f);
7761 outputTensorInfo.SetQuantizationOffset(3);
7762
7763 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7764 1, 2, 3, 4,
7765 2, 3, 4, 5,
7766 3, 4, 5, 6,
7767 4, 5, 6, 7
7768 }));
7769
7770 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7771 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7772 1, 3,
7773 3, 5
7774 }));
7775
7776 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7777 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7778
7779 armnn::ResizeBilinearQueueDescriptor descriptor;
7780 armnn::WorkloadInfo info;
7781 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7782 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7783
7784 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7785
7786 inputHandle->Allocate();
7787 outputHandle->Allocate();
7788 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7789
Derek Lambertif30f7d32019-04-09 10:25:02 +01007790 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007791 workload->Execute();
7792
7793 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7794 return result;
7795}
7796
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007797LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7798 armnn::IWorkloadFactory& workloadFactory,
7799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007800{
7801 constexpr unsigned int inputWidth = 3;
7802 constexpr unsigned int inputHeight = 2;
7803 constexpr unsigned int inputChannels = 1;
7804 constexpr unsigned int inputBatchSize = 1;
7805
7806 constexpr unsigned int outputWidth = 2;
7807 constexpr unsigned int outputHeight = 1;
7808 constexpr unsigned int outputChannels = inputChannels;
7809 constexpr unsigned int outputBatchSize = inputBatchSize;
7810
7811 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7812 armnn::DataType::QuantisedAsymm8);
7813 inputTensorInfo.SetQuantizationScale(1.5f);
7814 inputTensorInfo.SetQuantizationOffset(-1);
7815
7816 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7817 armnn::DataType::QuantisedAsymm8);
7818 outputTensorInfo.SetQuantizationScale(1.5f);
7819 outputTensorInfo.SetQuantizationOffset(-1);
7820
7821 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7822 1, 2, 3, // 3.0, 4.5, 6.0
7823 5, 8, 13 // 9.0, 13.5, 21.0
7824 }));
7825
7826 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7827 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7828 1, 3 // 3.0, 5.25
7829 }));
7830
7831 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7832 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7833
7834 armnn::ResizeBilinearQueueDescriptor descriptor;
7835 armnn::WorkloadInfo info;
7836 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7837 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7838
7839 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7840
7841 inputHandle->Allocate();
7842 outputHandle->Allocate();
7843
7844 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7845
Derek Lambertif30f7d32019-04-09 10:25:02 +01007846 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007847 workload->Execute();
7848
7849 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7850 return result;
7851}
7852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007853LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7854 armnn::IWorkloadFactory& workloadFactory,
7855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007856{
7857 constexpr unsigned int inputWidth = 2;
7858 constexpr unsigned int inputHeight = 3;
7859 constexpr unsigned int inputChannels = 1;
7860 constexpr unsigned int inputBatchSize = 1;
7861
7862 constexpr unsigned int outputWidth = 5;
7863 constexpr unsigned int outputHeight = 3;
7864 constexpr unsigned int outputChannels = inputChannels;
7865 constexpr unsigned int outputBatchSize = inputBatchSize;
7866
7867 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7868 armnn::DataType::QuantisedAsymm8);
7869 inputTensorInfo.SetQuantizationScale(0.010765f);
7870 inputTensorInfo.SetQuantizationOffset(7);
7871
7872 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7873 armnn::DataType::QuantisedAsymm8);
7874 outputTensorInfo.SetQuantizationScale(0.010132f);
7875 outputTensorInfo.SetQuantizationOffset(-18);
7876
7877 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7878 24, 228, // 0.183005, 2.379065,
7879 105, 128, // 1.05497, 1.302565
7880 230, 71 // 2.400595, 0.68896
7881 }));
7882
7883 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7884 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7885 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7886 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7887 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7888 }));
7889
7890 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7891 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7892
7893 armnn::ResizeBilinearQueueDescriptor descriptor;
7894 armnn::WorkloadInfo info;
7895 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7896 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7897
7898 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7899
7900 inputHandle->Allocate();
7901 outputHandle->Allocate();
7902 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7903
Derek Lambertif30f7d32019-04-09 10:25:02 +01007904 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007905 workload->Execute();
7906
7907 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7908 return result;
7909}
7910
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007911LayerTestResult<float, 2> Rsqrt2dTestCommon(
7912 armnn::IWorkloadFactory& workloadFactory,
7913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7914 const armnn::TensorInfo inputTensorInfo,
7915 const armnn::TensorInfo outputTensorInfo,
7916 std::vector<float> inputValues,
7917 std::vector<float> expectedOutputValues)
7918{
7919 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7920
7921 LayerTestResult<float, 2> result(outputTensorInfo);
7922 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7923
7924 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7925 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7926
7927 armnn::RsqrtQueueDescriptor descriptor;
7928
7929 armnn::WorkloadInfo info;
7930
7931 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7932 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7933
7934 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7935
7936 inputHandle->Allocate();
7937 outputHandle->Allocate();
7938
7939 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7940
Derek Lambertif30f7d32019-04-09 10:25:02 +01007941 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007942 workload->Execute();
7943
7944 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7945
7946 return result;
7947}
7948LayerTestResult<float, 2> Rsqrt2dTest(
7949 armnn::IWorkloadFactory& workloadFactory,
7950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7951{
7952 const armnn::TensorShape inputShape{ 2, 2 };
7953 const armnn::TensorShape outputShape{ 2, 2 };
7954
7955 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7956 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7957
7958 std::vector<float> inputValues
7959 {
7960 1.f, 4.f,
7961 16.f, 25.f
7962 };
7963
7964 std::vector<float> expectedOutputValues
7965 {
7966 1.f, 0.5f,
7967 0.25f, 0.2f
7968 };
7969
7970 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7971 inputTensorInfo, outputTensorInfo,
7972 inputValues, expectedOutputValues);
7973}
7974
7975LayerTestResult<float, 3> Rsqrt3dTest(
7976 armnn::IWorkloadFactory& workloadFactory,
7977 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7978{
7979 const armnn::TensorShape inputShape{ 3, 1, 2 };
7980 const armnn::TensorShape outputShape{ 3, 1, 2 };
7981
7982 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7983 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7984
7985 std::vector<float> inputValues
7986 {
7987 1.f, 4.f, 16.f,
7988 25.f, 64.f, 100.f
7989 };
7990
7991 std::vector<float> expectedOutputValues
7992 {
7993 1.f, 0.5f, 0.25f,
7994 0.2f, 0.125f, 0.1f
7995 };
7996
7997 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7998
7999 LayerTestResult<float, 3> result(outputTensorInfo);
8000 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
8001
8002 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8003 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8004
8005 armnn::RsqrtQueueDescriptor descriptor;
8006
8007 armnn::WorkloadInfo info;
8008
8009 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
8010 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
8011
8012 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
8013
8014 inputHandle->Allocate();
8015 outputHandle->Allocate();
8016
8017 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
8018
Derek Lambertif30f7d32019-04-09 10:25:02 +01008019 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00008020 workload->Execute();
8021
8022 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
8023
8024 return result;
8025}
8026
8027LayerTestResult<float, 2> RsqrtZeroTest(
8028 armnn::IWorkloadFactory& workloadFactory,
8029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8030{
8031 const armnn::TensorShape inputShape{ 1, 2 };
8032 const armnn::TensorShape outputShape{ 1, 2 };
8033
8034 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
8035 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
8036
8037 std::vector<float> inputValues
8038 {
8039 0.f, -0.f
8040 };
8041
8042 std::vector<float> expectedOutputValues
8043 {
8044 INFINITY, -INFINITY
8045 };
8046
8047 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
8048 inputTensorInfo, outputTensorInfo,
8049 inputValues, expectedOutputValues);
8050}
8051
8052LayerTestResult<float, 2> RsqrtNegativeTest(
8053 armnn::IWorkloadFactory& workloadFactory,
8054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8055{
8056 const armnn::TensorShape inputShape{ 1, 2 };
8057 const armnn::TensorShape outputShape{ 1, 2 };
8058
8059 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
8060 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
8061
8062 std::vector<float> inputValues
8063 {
8064 -25.f, -16.f
8065 };
8066
8067 std::vector<float> expectedOutputValues
8068 {
8069 -NAN, -NAN
8070 };
8071
8072 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
8073 inputTensorInfo, outputTensorInfo,
8074 inputValues, expectedOutputValues);
8075}
8076
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008077LayerTestResult<float, 4> BatchNormTest(
8078 armnn::IWorkloadFactory& workloadFactory,
8079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008080{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008081 // BatchSize: 1
8082 // Channels: 2
8083 // Height: 3
8084 // Width: 2
8085
8086 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8087 std::vector<float> inputValues
8088 {
8089 // Batch 0, Channel 0, Height (3) x Width (2)
8090 1.f, 4.f,
8091 4.f, 2.f,
8092 1.f, 6.f,
8093
8094 // Batch 0, Channel 1, Height (3) x Width (2)
8095 1.f, 1.f,
8096 4.f, 1.f,
8097 -2.f, 4.f
8098 };
8099 std::vector<float> expectedOutputValues
8100 {
8101 // Batch 0, Channel 0, Height (3) x Width (2)
8102 1.f, 4.f,
8103 4.f, 2.f,
8104 1.f, 6.f,
8105
8106 // Batch 0, Channel 1, Height (3) x Width (2)
8107 3.f, 3.f,
8108 4.f, 3.f,
8109 2.f, 4.f
8110 };
8111
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008112 return BatchNormTestImpl<armnn::DataType::Float32>(
8113 workloadFactory, memoryManager,
8114 inputOutputShape, inputValues, expectedOutputValues,
8115 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008116}
8117
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008118LayerTestResult<float, 4> BatchNormNhwcTest(
8119 armnn::IWorkloadFactory& workloadFactory,
8120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008121{
8122 // BatchSize: 1
8123 // Height: 3
8124 // Width: 2
8125 // Channels: 2
8126
8127 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8128 std::vector<float> inputValues
8129 {
8130 // Batch 0, Height 0, Width (2) x Channel (2)
8131 1.f, 1.f,
8132 4.f, 1.f,
8133
8134 // Batch 0, Height 1, Width (2) x Channel (2)
8135 4.f, 4.f,
8136 2.f, 1.f,
8137
8138 // Batch 0, Height 2, Width (2) x Channel (2)
8139 1.f, -2.f,
8140 6.f, 4.f
8141 };
8142 std::vector<float> expectedOutputValues
8143 {
8144 // Batch 0, Height 0, Width (2) x Channel (2)
8145 1.f, 3.f,
8146 4.f, 3.f,
8147
8148 // Batch 0, Height 1, Width (2) x Channel (2)
8149 4.f, 4.f,
8150 2.f, 3.f,
8151
8152 // Batch 0, Height 2, Width (2) x Channel (2)
8153 1.f, 2.f,
8154 6.f, 4.f
8155 };
8156
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008157 return BatchNormTestImpl<armnn::DataType::Float32>(
8158 workloadFactory, memoryManager,
8159 inputOutputShape, inputValues, expectedOutputValues,
8160 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008161}
8162
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008163LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8164 armnn::IWorkloadFactory& workloadFactory,
8165 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008166{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008167 // BatchSize: 1
8168 // Channels: 2
8169 // Height: 3
8170 // Width: 2
8171
8172 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8173 std::vector<float> inputValues
8174 {
8175 // Batch 0, Channel 0, Height (3) x Width (2)
8176 1.f, 4.f,
8177 4.f, 2.f,
8178 1.f, 6.f,
8179
8180 // Batch 0, Channel 1, Height (3) x Width (2)
8181 1.f, 1.f,
8182 4.f, 1.f,
8183 -2.f, 4.f
8184 };
8185 std::vector<float> expectedOutputValues
8186 {
8187 // Batch 0, Channel 0, Height (3) x Width (2)
8188 1.f, 4.f,
8189 4.f, 2.f,
8190 1.f, 6.f,
8191
8192 // Batch 0, Channel 1, Height (3) x Width (2)
8193 3.f, 3.f,
8194 4.f, 3.f,
8195 2.f, 4.f
8196 };
8197
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008198 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8199 workloadFactory, memoryManager,
8200 inputOutputShape, inputValues, expectedOutputValues,
8201 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008202}
8203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008204LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8205 armnn::IWorkloadFactory& workloadFactory,
8206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008207{
8208 // BatchSize: 1
8209 // Height: 3
8210 // Width: 2
8211 // Channels: 2
8212
8213 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8214 std::vector<float> inputValues
8215 {
8216 // Batch 0, Height 0, Width (2) x Channel (2)
8217 1.f, 1.f,
8218 4.f, 1.f,
8219
8220 // Batch 0, Height 1, Width (2) x Channel (2)
8221 4.f, 4.f,
8222 2.f, 1.f,
8223
8224 // Batch 0, Height 2, Width (2) x Channel (2)
8225 1.f, -2.f,
8226 6.f, 4.f
8227 };
8228 std::vector<float> expectedOutputValues
8229 {
8230 // Batch 0, Height 0, Width (2) x Channel (2)
8231 1.f, 3.f,
8232 4.f, 3.f,
8233
8234 // Batch 0, Height 1, Width (2) x Channel (2)
8235 4.f, 4.f,
8236 2.f, 3.f,
8237
8238 // Batch 0, Height 2, Width (2) x Channel (2)
8239 1.f, 2.f,
8240 6.f, 4.f
8241 };
8242
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008243 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8244 (workloadFactory, memoryManager,
8245 inputOutputShape, inputValues, expectedOutputValues,
8246 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008247}
8248
Nina Drozd58ef2c62019-05-16 12:09:18 +01008249LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008250 armnn::IWorkloadFactory& workloadFactory,
8251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008252{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008253 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008254}
8255
Nina Drozd58ef2c62019-05-16 12:09:18 +01008256LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8257 armnn::IWorkloadFactory& workloadFactory,
8258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8259{
8260 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8261}
8262
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008263LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8264 armnn::IWorkloadFactory& workloadFactory,
8265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008266{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008267 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008268}
8269
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008270LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8271 armnn::IWorkloadFactory& workloadFactory,
8272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008273{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008274 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008275}
8276
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008277LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8278 armnn::IWorkloadFactory& workloadFactory,
8279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008280{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008281 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008282}
8283
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008284LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8285 armnn::IWorkloadFactory& workloadFactory,
8286 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008287{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008288 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8289 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008290}
8291
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008292LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8293 armnn::IWorkloadFactory& workloadFactory,
8294 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008295{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008296 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8297 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008298}
8299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008300LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8301 armnn::IWorkloadFactory& workloadFactory,
8302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008303{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008304 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008305}
8306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008307LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8308 armnn::IWorkloadFactory& workloadFactory,
8309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008310{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008311 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008312}
8313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008314LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8315 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8317 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008318{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008319 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8320 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008321}
8322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008323LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8324 armnn::IWorkloadFactory& workloadFactory,
8325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008327 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008328}
8329
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008330LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8331 armnn::IWorkloadFactory& workloadFactory,
8332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008334 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8335 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008336}
8337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008338LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8339 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8341 bool useSubtensor)
8342{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008343 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8344 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008345}
8346
8347LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8348 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008350{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008351 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008352}
8353
8354LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8355 armnn::IWorkloadFactory& workloadFactory,
8356 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8357{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008358 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008359}
8360
8361LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8362 armnn::IWorkloadFactory& workloadFactory,
8363 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8364{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008365 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008366}
8367
8368LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8369 armnn::IWorkloadFactory& workloadFactory,
8370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8371{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008372 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8373 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008374}
8375
8376LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8377 armnn::IWorkloadFactory& workloadFactory,
8378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8379{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008380 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8381 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008382}
8383
8384LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8385 armnn::IWorkloadFactory& workloadFactory,
8386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8387{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008388 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8389 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008390}
8391
8392LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8393 armnn::IWorkloadFactory& workloadFactory,
8394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8395{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008396 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8397 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008398}
8399
8400LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8401 armnn::IWorkloadFactory& workloadFactory,
8402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8403 bool useSubtensor)
8404{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008405 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8406 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008407}
8408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008409LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8410 armnn::IWorkloadFactory& workloadFactory,
8411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8412 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008413{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008414 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8415 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008416}
8417
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008418LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8419 armnn::IWorkloadFactory& workloadFactory,
8420 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8421 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008422{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008423 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008424 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008425}
8426
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008427LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8428 armnn::IWorkloadFactory& workloadFactory,
8429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8430 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008431{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008432 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8433 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008434}
8435
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008436LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8437 armnn::IWorkloadFactory& workloadFactory,
8438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8439 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008440{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008441 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008442 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008443}
8444
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008445LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8446 armnn::IWorkloadFactory& workloadFactory,
8447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008448 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008449{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008450 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008451}
8452
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008453LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8454 armnn::IWorkloadFactory& workloadFactory,
8455 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008456 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008457{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008458 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008459}
8460
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008461LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8462 armnn::IWorkloadFactory& workloadFactory,
8463 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008464 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008465{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008466 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008467}
8468
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008469LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8470 armnn::IWorkloadFactory& workloadFactory,
8471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008472 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008473{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008474 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008475 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008476}
8477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008478LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8479 armnn::IWorkloadFactory& workloadFactory,
8480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8481 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008483 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008484 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008485}
8486
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008487LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8488 armnn::IWorkloadFactory& workloadFactory,
8489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008490{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008491 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008492}
8493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008494LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8495 armnn::IWorkloadFactory& workloadFactory,
8496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008498 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8499 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008500}
8501
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008502LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8503 armnn::IWorkloadFactory& workloadFactory,
8504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008505 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008506{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008507 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008508}
8509
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008510LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8511 armnn::IWorkloadFactory& workloadFactory,
8512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008513 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008514{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008515 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008516}
8517
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008518LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8519 armnn::IWorkloadFactory& workloadFactory,
8520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008521{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008522 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008523}
8524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008525LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8526 armnn::IWorkloadFactory& workloadFactory,
8527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008528{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008529 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008530}
8531
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008532LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8533 armnn::IWorkloadFactory& workloadFactory,
8534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008535{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008536 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008537}
8538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008539LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8540 armnn::IWorkloadFactory& workloadFactory,
8541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008542{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008543 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008544}
8545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008546LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8547 armnn::IWorkloadFactory& workloadFactory,
8548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008549{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008550 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008551}
8552
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008553LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8554 armnn::IWorkloadFactory& workloadFactory,
8555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008556{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008557 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008558}
8559
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008560LayerTestResult<float, 4> L2Pooling2dSize7Test(
8561 armnn::IWorkloadFactory& workloadFactory,
8562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008563{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008564 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008565}
8566
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008567LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8568 armnn::IWorkloadFactory& workloadFactory,
8569 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008570{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008571 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008572}
8573
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008574LayerTestResult<float, 4> L2Pooling2dSize9Test(
8575 armnn::IWorkloadFactory& workloadFactory,
8576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008577{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008578 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008579}
8580
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008581LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8582 armnn::IWorkloadFactory& workloadFactory,
8583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008584{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008585 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008586}
8587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008588LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8589 armnn::IWorkloadFactory& workloadFactory,
8590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008591{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008592 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008593}
8594
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008595LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8596 armnn::IWorkloadFactory& workloadFactory,
8597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008598{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008599 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008600}
8601
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008602LayerTestResult<float, 4> ComparePooling2dTest(
8603 armnn::IWorkloadFactory& workloadFactory,
8604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8605 armnn::IWorkloadFactory& refWorkloadFactory,
8606 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008607{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008608 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008609 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008610}
8611
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008612LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8613 armnn::IWorkloadFactory& workloadFactory,
8614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8615 armnn::IWorkloadFactory& refWorkloadFactory,
8616 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008617{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008618 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008619 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008620}
8621
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008622LayerTestResult<float, 2> FullyConnectedLargeTest(
8623 armnn::IWorkloadFactory& workloadFactory,
8624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8625 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008626{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008627 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008628}
8629
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008630LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8631 armnn::IWorkloadFactory& workloadFactory,
8632 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008633{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008634 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008635}
8636
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008637LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8638 armnn::IWorkloadFactory& workloadFactory,
8639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008640{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008641 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8642 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008643}
8644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008645LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8646 armnn::IWorkloadFactory& workloadFactory,
8647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008648{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008649 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008650}
8651
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008652LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8653 armnn::IWorkloadFactory& workloadFactory,
8654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008655{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008656 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8657 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008658}
8659
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008660LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8661 armnn::IWorkloadFactory& workloadFactory,
8662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008663{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008664 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008665}
8666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008667LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8668 armnn::IWorkloadFactory& workloadFactory,
8669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008671 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8672 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008673}
8674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008675LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8676 armnn::IWorkloadFactory& workloadFactory,
8677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008678{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008679 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8680 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008681}
8682
8683LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008684 armnn::IWorkloadFactory& workloadFactory,
8685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008686{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008687 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8688 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008689}
8690
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008691LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8692 armnn::IWorkloadFactory& workloadFactory,
8693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008694{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008695 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008696}
8697
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008698LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8699 armnn::IWorkloadFactory& workloadFactory,
8700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008701{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008702 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8703 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008704}
8705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008706LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8707 armnn::IWorkloadFactory& workloadFactory,
8708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008710 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008711}
8712
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008713LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8714 armnn::IWorkloadFactory& workloadFactory,
8715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008716{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008717 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008718}
8719
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008720LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8721 armnn::IWorkloadFactory& workloadFactory,
8722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008723{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008724 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008725}
8726
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008727LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8728 armnn::IWorkloadFactory& workloadFactory,
8729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008730{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008731 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008732}
8733
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008734LayerTestResult<float, 4> SimplePermuteFloat32Test(
8735 armnn::IWorkloadFactory& workloadFactory,
8736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008737{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008738 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008739};
8740
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008741LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8742 armnn::IWorkloadFactory& workloadFactory,
8743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008744{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008745 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008746};
surmeh01bceff2f2018-03-29 16:29:27 +01008747
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008748LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8749 armnn::IWorkloadFactory& workloadFactory,
8750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008751{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008752 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008753};
8754
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008755LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8756 armnn::IWorkloadFactory& workloadFactory,
8757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008758{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008759 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008760};
8761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008762LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8763 armnn::IWorkloadFactory& workloadFactory,
8764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008765{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008766 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008767};
8768
8769namespace
8770{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008771
narpra011e4c31d2018-09-28 11:07:51 +01008772template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008773LayerTestResult<T, OutputDim> MeanTestHelper(
8774 armnn::IWorkloadFactory& workloadFactory,
8775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8776 const unsigned int* inputShape,
8777 const std::vector<T>& inputData,
8778 const std::vector<unsigned int>& axis,
8779 bool keepDims,
8780 const unsigned int* outputShape,
8781 const std::vector<T>& outputData,
8782 float scale = 1.0f,
8783 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008784{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008785 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008786
8787 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8788 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8789
8790 inputTensorInfo.SetQuantizationScale(scale);
8791 inputTensorInfo.SetQuantizationOffset(offset);
8792
8793 outputTensorInfo.SetQuantizationScale(scale);
8794 outputTensorInfo.SetQuantizationOffset(offset);
8795
8796 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8797
8798 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8799 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8800
8801 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8802 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8803
8804 armnn::MeanQueueDescriptor data;
8805 data.m_Parameters.m_Axis = axis;
8806 data.m_Parameters.m_KeepDims = keepDims;
8807 armnn::WorkloadInfo info;
8808 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8809 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8810
8811 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8812
8813 inputHandle->Allocate();
8814 outputHandle->Allocate();
8815
8816 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8817
Derek Lambertif30f7d32019-04-09 10:25:02 +01008818 workload->PostAllocationConfigure();
narpra011e4c31d2018-09-28 11:07:51 +01008819 workload->Execute();
8820
8821 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8822
8823 return result;
8824}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008825
narpra011e4c31d2018-09-28 11:07:51 +01008826} // anonymous namespace
8827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008828LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8829 armnn::IWorkloadFactory& workloadFactory,
8830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008831{
8832 const unsigned int inputShape[] = { 3, 2 };
8833 const unsigned int outputShape[] = { 1 };
8834
8835 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8836 std::vector<uint8_t> output({ 2 });
8837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008838 return MeanTestHelper<uint8_t, 2, 1>(
8839 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008840}
8841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008842LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8843 armnn::IWorkloadFactory& workloadFactory,
8844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008845{
8846 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8847 const unsigned int outputShape[] = { 1, 1, 2 };
8848
8849 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8850 std::vector<uint8_t> output({ 2, 2 });
8851
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008852 return MeanTestHelper<uint8_t, 4, 3>(
8853 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008854}
8855
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008856LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8857 armnn::IWorkloadFactory& workloadFactory,
8858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008859{
8860 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8861 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8862
8863 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8864 std::vector<uint8_t> output({ 2, 2 });
8865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008866 return MeanTestHelper<uint8_t, 4, 4>(
8867 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008868}
8869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008870LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8871 armnn::IWorkloadFactory& workloadFactory,
8872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008873{
8874 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8875 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8876
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008877 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008878 std::vector<uint8_t> output({ 1, 3, 5 });
8879
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008880 return MeanTestHelper<uint8_t, 4, 4>(
8881 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008882}
8883
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008884LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8885 armnn::IWorkloadFactory& workloadFactory,
8886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008887{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008888 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008889 const unsigned int outputShape[] = { 2 };
8890
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008891 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8892 24 });
8893 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008895 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8896 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008897 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008898}
8899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008900LayerTestResult<float, 1> MeanFloatSimpleTest(
8901 armnn::IWorkloadFactory& workloadFactory,
8902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008903{
8904 const unsigned int inputShape[] = { 3, 2 };
8905 const unsigned int outputShape[] = { 1 };
8906
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008907 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8908 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008909
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008910 return MeanTestHelper<float, 2, 1>(
8911 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008912}
8913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8915 armnn::IWorkloadFactory& workloadFactory,
8916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008917{
8918 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8919 const unsigned int outputShape[] = { 3, 1, 2 };
8920
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008921 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8922 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008923
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008924 return MeanTestHelper<float, 4, 3>(
8925 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008926}
8927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008928LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8929 armnn::IWorkloadFactory& workloadFactory,
8930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008931{
8932 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8933 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8934
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008935 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8936 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008937
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008938 return MeanTestHelper<float, 4, 4>(
8939 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008940}
8941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008942LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8943 armnn::IWorkloadFactory& workloadFactory,
8944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008945{
8946 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8947 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8948
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008949 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8950 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008951
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008952 return MeanTestHelper<float, 4, 4>(
8953 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008954}
8955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008956LayerTestResult<float, 1> MeanVtsFloat1Test(
8957 armnn::IWorkloadFactory& workloadFactory,
8958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008959{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008960 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008961 const unsigned int outputShape[] = { 2 };
8962
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008963 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8964 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8965 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008966
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008967 return MeanTestHelper<float, 3, 1>(
8968 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008969}
8970
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008971LayerTestResult<float, 3> MeanVtsFloat2Test(
8972 armnn::IWorkloadFactory& workloadFactory,
8973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008974{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008975 const unsigned int inputShape[] = { 4, 3, 2 };
8976 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008977
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008978 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8979 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8980 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008981
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008982 return MeanTestHelper<float, 3, 3>(
8983 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008984}
8985
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008986LayerTestResult<float, 3> MeanVtsFloat3Test(
8987 armnn::IWorkloadFactory& workloadFactory,
8988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008989{
8990 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8991 const unsigned int outputShape[] = { 1, 2, 1 };
8992
8993 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8994 std::vector<float> output({ 1.5f, 3.5f });
8995
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008996 return MeanTestHelper<float, 4, 3>(
8997 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008998}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008999
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009000LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
9001 armnn::IWorkloadFactory& workloadFactory,
9002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009003{
9004 // Create Initial Tensor
9005 // 1, 2, 3
9006 // 4, 5, 6
9007 // 7, 8, 9
9008
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009009 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
9010 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009011
9012 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
9013 {1, 2, 3,
9014 4, 5, 6,
9015 7, 8, 9
9016 });
9017
9018 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
9019 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
9020 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
9021 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
9022
9023 // Apply MaxPool poolSize = 1x1, stride=2x2
9024 // Result =
9025 // 1, 3
9026 // 7, 9
9027 armnn::Pooling2dDescriptor descriptor;
9028 descriptor.m_PoolHeight = 1;
9029 descriptor.m_PoolWidth = 1;
9030 descriptor.m_StrideX = 2;
9031 descriptor.m_StrideY = 2;
9032 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9033
9034 armnn::Pooling2dQueueDescriptor queueDescriptor;
9035 queueDescriptor.m_Parameters = descriptor;
9036 armnn::WorkloadInfo workloadInfo;
9037 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9038 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9039
9040 // Create the MaxPool
9041 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9042
9043 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9044 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9045 boost::multi_array<float, 4> resultMaxPool;
9046 resultMaxPool.resize(shape);
9047
9048
9049 // Create addition with another tensor the same size
9050 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9051 // with the initial tensor.
9052 // 12, 16
9053 // 24, 28
9054
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009055 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9056 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009057
9058 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9059 {12, 16,
9060 24, 28,
9061 });
9062
9063 // Expected output tensor after MaxPool and Addition.
9064 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9065 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9066 {
9067 13, 19,
9068 31, 37
9069 }));
9070
9071 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9072 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9073
9074 armnn::AdditionQueueDescriptor data;
9075 armnn::WorkloadInfo info;
9076
9077 // Add the output of the MaxPool and the new tensor
9078 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9079 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9080 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9081
9082 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9083
9084 poolingInputHandle->Allocate();
9085 poolingOutputHandle->Allocate();
9086 addInputHandle->Allocate();
9087 addOutputHandle->Allocate();
9088
9089 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9090 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9091
9092 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9093 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9094
Derek Lambertif30f7d32019-04-09 10:25:02 +01009095 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009096 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009097 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009098 addWorkload->Execute();
9099
9100 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9101
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009102 return addRet;
9103}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009105LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9106 armnn::IWorkloadFactory& workloadFactory,
9107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009108{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009109 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009110}
9111
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009112LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9113 armnn::IWorkloadFactory& workloadFactory,
9114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009115{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009116 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009117}
9118
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009119LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9120 armnn::IWorkloadFactory& workloadFactory,
9121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009122{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009123 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009124}
9125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009126LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9127 armnn::IWorkloadFactory& workloadFactory,
9128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009129{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009130 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009131}
9132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009133LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9134 armnn::IWorkloadFactory& workloadFactory,
9135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009136{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009137 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009138}
9139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009140LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9141 armnn::IWorkloadFactory& workloadFactory,
9142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009143{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009144 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009145}
9146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009147LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9148 armnn::IWorkloadFactory& workloadFactory,
9149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009150{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009151 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009152}
9153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009154LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9155 armnn::IWorkloadFactory& workloadFactory,
9156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009157{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009158 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009159}
9160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009161LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9162 armnn::IWorkloadFactory& workloadFactory,
9163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009164{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009165 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009166}
9167
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009168LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9169 armnn::IWorkloadFactory& workloadFactory,
9170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009171{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009172 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009173}
9174
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009175LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9176 armnn::IWorkloadFactory& workloadFactory,
9177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009178{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009179 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009180}
9181
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009182LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9183 armnn::IWorkloadFactory& workloadFactory,
9184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009186 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009187}
9188
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009189LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9190 armnn::IWorkloadFactory& workloadFactory,
9191 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009192{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009193 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009194}
9195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009196LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9197 armnn::IWorkloadFactory& workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009199{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009200 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009201}
9202
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009203LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9204 armnn::IWorkloadFactory& workloadFactory,
9205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009206{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009207 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009208}
9209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009210LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9211 armnn::IWorkloadFactory& workloadFactory,
9212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009213{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009214 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009215}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009216
9217namespace {
9218
9219template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009220LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9221 armnn::IWorkloadFactory &workloadFactory,
9222 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9223 const armnn::DataLayout& dataLayout,
9224 const unsigned int *inputShape,
9225 const std::vector<T> &inputData,
9226 const std::vector<unsigned int> &blockShape,
9227 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9228 const unsigned int *outputShape,
9229 const std::vector<T> &outputData,
9230 float scale = 1.0f,
9231 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009232{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009233 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9234
9235 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9236 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9237
9238 inputTensorInfo.SetQuantizationScale(scale);
9239 inputTensorInfo.SetQuantizationOffset(offset);
9240
9241 outputTensorInfo.SetQuantizationScale(scale);
9242 outputTensorInfo.SetQuantizationOffset(offset);
9243
9244 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9245
9246 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9247 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9248
9249 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9250 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9251
9252 armnn::BatchToSpaceNdQueueDescriptor data;
9253 data.m_Parameters.m_DataLayout = dataLayout;
9254 data.m_Parameters.m_BlockShape = blockShape;
9255 data.m_Parameters.m_Crops = crops;
9256 armnn::WorkloadInfo info;
9257 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9258 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9259
9260 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9261
9262 inputHandle->Allocate();
9263 outputHandle->Allocate();
9264
9265 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9266
Derek Lambertif30f7d32019-04-09 10:25:02 +01009267 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009268 workload->Execute();
9269
9270 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9271
9272 return result;
9273}
9274
9275} // anonymous namespace
9276
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009277LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9278 armnn::IWorkloadFactory& workloadFactory,
9279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009280{
9281 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009282 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009283
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009284 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009285 // Batch 0, Height 0, Width (2) x Channel (1)
9286 1.0f, 3.0f,
9287 // Batch 0, Height 1, Width (2) x Channel (1)
9288 9.0f, 11.0f,
9289
9290
9291 // Batch 1, Height 0, Width (2) x Channel (1)
9292 2.0f, 4.0f,
9293 // Batch 1, Height 1, Width (2) x Channel (1)
9294 10.0f, 12.0f,
9295
9296
9297 // Batch 2, Height 0, Width (2) x Channel (1)
9298 5.0f, 7.0f,
9299 // Batch 2, Height 1, Width (2) x Channel (1)
9300 13.0f, 15.0f,
9301
9302 // Batch 3, Height 0, Width (2) x Channel (3)
9303 6.0f, 8.0f,
9304 // Batch 3, Height 1, Width (2) x Channel (1)
9305 14.0f, 16.0f
9306 });
9307
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009308 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009309 1.0f, 2.0f, 3.0f, 4.0f,
9310 5.0f, 6.0f, 7.0f, 8.0f,
9311 9.0f, 10.0f, 11.0f, 12.0f,
9312 13.0f, 14.0f, 15.0f, 16.0f
9313 });
9314
9315 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009316 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009317
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009318 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9319 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009320 crops, outputShape, expectedOutput);
9321}
9322
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009323LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9324 armnn::IWorkloadFactory& workloadFactory,
9325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009326{
9327 const unsigned int inputShape[] = {4, 1, 1, 1};
9328 const unsigned int outputShape[] = {1, 2, 2, 1};
9329
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009330 std::vector<float> input({
9331 // Batch 0, Height 0, Width (2) x Channel (1)
9332 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009333 });
9334
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009335 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009336
9337 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009338 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009339
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009340 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9341 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9342 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009343}
9344
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009345LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9346 armnn::IWorkloadFactory& workloadFactory,
9347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009348{
9349 const unsigned int inputShape[] = {4, 1, 1, 3};
9350 const unsigned int outputShape[] = {1, 2, 2, 3};
9351
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009352 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009353
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009354 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009355
9356 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009357 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009359 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9360 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9361 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009362}
9363
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009364LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9365 armnn::IWorkloadFactory& workloadFactory,
9366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9367{
9368 const unsigned int inputShape[] = {8, 1, 3, 1};
9369 const unsigned int outputShape[] = {2, 2, 4, 1};
9370
9371 std::vector<float> input({
9372 0.0f, 1.0f, 3.0f,
9373 0.0f, 9.0f, 11.0f,
9374 0.0f, 2.0f, 4.0f,
9375 0.0f, 10.0f, 12.0f,
9376 0.0f, 5.0f, 7.0f,
9377 0.0f, 13.0f, 15.0f,
9378 0.0f, 6.0f, 8.0f,
9379 0.0f, 14.0f, 16.0f
9380 });
9381
9382 std::vector<float> expectedOutput({
9383 1.0f, 2.0f, 3.0f, 4.0f,
9384 5.0f, 6.0f, 7.0f, 8.0f,
9385 9.0f, 10.0f, 11.0f, 12.0f,
9386 13.0f, 14.0f, 15.0f, 16.0f
9387 });
9388
9389 std::vector<unsigned int> blockShape({2, 2});
9390 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9391
9392 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9393 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9394 crops, outputShape, expectedOutput);
9395}
9396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009397LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9398 armnn::IWorkloadFactory &workloadFactory,
9399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009400{
9401 const unsigned int inputShape[] = {4, 3, 1, 1};
9402 const unsigned int outputShape[] = {1, 3, 2, 2};
9403
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009404 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009405
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009406 std::vector<float> expectedOutput({
9407 // Batch 0, Channel 0, Height (2) x Width (2)
9408 1.0f, 4.0f,
9409 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009410
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009411 // Batch 0, Channel 1, Height (2) x Width (2)
9412 2.0f, 5.0f,
9413 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009414
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009415 // Batch 0, Channel 2, Height (2) x Width (2)
9416 3.0f, 6.0f,
9417 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009418 });
9419
9420 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009421 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009422
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009423 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9424 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9425 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009426}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009427
Mike Kelly831faed2018-11-28 11:52:08 +00009428LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009429 armnn::IWorkloadFactory& workloadFactory,
9430 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009431{
9432 const unsigned int inputShape[] = {4, 1, 1, 1};
9433 const unsigned int outputShape[] = {1, 1, 2, 2};
9434
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009435 std::vector<float> input({
9436 // Batch 0, Height 0, Width (2) x Channel (1)
9437 1.0f, 2.0f, 3.0f, 4.0f
9438 });
Mike Kelly831faed2018-11-28 11:52:08 +00009439
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009440 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009441
9442 std::vector<unsigned int> blockShape({2, 2});
9443 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9444
9445 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9446 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9447 crops, outputShape, expectedOutput);
9448}
9449
9450LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009451 armnn::IWorkloadFactory& workloadFactory,
9452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009453{
9454 const unsigned int inputShape[] = {4, 3, 1, 1};
9455 const unsigned int outputShape[] = {1, 3, 2, 2};
9456
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009457 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009458
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009459 std::vector<float> expectedOutput({
9460 // Batch 0, Channel 0, Height (2) x Width (2)
9461 1.0f, 7.0f,
9462 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009463
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009464 // Batch 0, Channel 1, Height (2) x Width (2)
9465 3.0f, 9.0f,
9466 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009467
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009468 // Batch 0, Channel 2, Height (2) x Width (2)
9469 5.0f, 11.0f,
9470 6.0f, 12.0f,
9471 });
Mike Kelly831faed2018-11-28 11:52:08 +00009472
9473 std::vector<unsigned int> blockShape({2, 2});
9474 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9475
9476 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9477 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9478 crops, outputShape, expectedOutput);
9479}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009480
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009481LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9482 armnn::IWorkloadFactory& workloadFactory,
9483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009484{
9485 const unsigned int inputShape[] = {4, 2, 2, 1};
9486 const unsigned int outputShape[] = {1, 4, 4, 1};
9487
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009488 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9489 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009490
9491 std::vector<unsigned int> blockShape({2, 2});
9492 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9493
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009494 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9495 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009496}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009497
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009498LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9499 armnn::IWorkloadFactory& workloadFactory,
9500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9501{
9502 const unsigned int inputShape[] = {4, 1, 1, 1};
9503 const unsigned int outputShape[] = {1, 2, 2, 1};
9504
9505 std::vector<uint8_t> input({
9506 // Batch 0, Height 0, Width (2) x Channel (1)
9507 1, 2, 3, 4
9508 });
9509
9510 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9511
9512 std::vector<unsigned int> blockShape({2, 2});
9513 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9514
9515 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9516 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9517 crops, outputShape, expectedOutput);
9518}
9519
9520LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9521 armnn::IWorkloadFactory& workloadFactory,
9522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9523{
9524 const unsigned int inputShape[] = {4, 1, 1, 3};
9525 const unsigned int outputShape[] = {1, 2, 2, 3};
9526
9527 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9528
9529 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9530
9531 std::vector<unsigned int> blockShape({2, 2});
9532 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9533
9534 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9535 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9536 crops, outputShape, expectedOutput);
9537}
9538
9539
9540LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9541 armnn::IWorkloadFactory &workloadFactory,
9542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9543{
9544 const unsigned int inputShape[] = {4, 3, 1, 1};
9545 const unsigned int outputShape[] = {1, 3, 2, 2};
9546
9547 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9548
9549 std::vector<uint8_t> expectedOutput({
9550 // Batch 0, Channel 0, Height (2) x Width (2)
9551 1, 4,
9552 7, 10,
9553
9554 // Batch 0, Channel 1, Height (2) x Width (2)
9555 2, 5,
9556 8, 11,
9557
9558 // Batch 0, Channel 2, Height (2) x Width (2)
9559 3, 6,
9560 9, 12,
9561 });
9562
9563 std::vector<unsigned int> blockShape({2, 2});
9564 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9565
9566 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9567 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9568 crops, outputShape, expectedOutput);
9569}
9570
9571LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9572 armnn::IWorkloadFactory& workloadFactory,
9573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9574{
9575 const unsigned int inputShape[] = {4, 1, 1, 1};
9576 const unsigned int outputShape[] = {1, 1, 2, 2};
9577
9578 std::vector<uint8_t> input({
9579 // Batch 0, Height 0, Width (2) x Channel (1)
9580 1, 2, 3, 4
9581 });
9582
9583 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9584
9585 std::vector<unsigned int> blockShape({2, 2});
9586 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9587
9588 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9589 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9590 crops, outputShape, expectedOutput);
9591}
9592
9593LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9594 armnn::IWorkloadFactory& workloadFactory,
9595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9596{
9597 const unsigned int inputShape[] = {4, 3, 1, 1};
9598 const unsigned int outputShape[] = {1, 3, 2, 2};
9599
9600 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9601
9602 std::vector<uint8_t> expectedOutput({
9603 // Batch 0, Channel 0, Height (2) x Width (2)
9604 1, 7,
9605 2, 8,
9606
9607 // Batch 0, Channel 1, Height (2) x Width (2)
9608 3, 9,
9609 4, 10,
9610
9611 // Batch 0, Channel 2, Height (2) x Width (2)
9612 5, 11,
9613 6, 12,
9614 });
9615
9616 std::vector<unsigned int> blockShape({2, 2});
9617 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9618
9619 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9620 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9621 crops, outputShape, expectedOutput);
9622}
9623
9624LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9625 armnn::IWorkloadFactory& workloadFactory,
9626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9627{
9628 const unsigned int inputShape[] = {8, 1, 1, 3};
9629 const unsigned int outputShape[] = {2, 1, 2, 4};
9630
9631 std::vector<uint8_t> input({
9632 0, 1, 3, 0, 9, 11,
9633 0, 2, 4, 0, 10, 12,
9634 0, 5, 7, 0, 13, 15,
9635 0, 6, 8, 0, 14, 16
9636 });
9637
9638 std::vector<uint8_t> expectedOutput({
9639 1, 2, 3, 4,
9640 5, 6, 7, 8,
9641 9, 10, 11, 12,
9642 13, 14, 15, 16
9643 });
9644
9645 std::vector<unsigned int> blockShape({2, 2});
9646 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9647
9648 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9649 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9650 crops, outputShape, expectedOutput);
9651}
9652
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009653LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9654 armnn::IWorkloadFactory& workloadFactory,
9655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9656{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009657 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009658}
9659
9660LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9661 armnn::IWorkloadFactory& workloadFactory,
9662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9663{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009664 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009665}
9666
9667LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9668 armnn::IWorkloadFactory& workloadFactory,
9669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009671 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009672}
9673
9674LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9675 armnn::IWorkloadFactory& workloadFactory,
9676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9677{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009678 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009679}
9680
9681LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9682 armnn::IWorkloadFactory& workloadFactory,
9683 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9684{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009685 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009686}
9687
9688LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9689 armnn::IWorkloadFactory& workloadFactory,
9690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9691{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009692 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009693}
9694
9695LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9696 armnn::IWorkloadFactory& workloadFactory,
9697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9698{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009699 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009700}
9701
9702LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9703 armnn::IWorkloadFactory& workloadFactory,
9704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9705{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009706 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009707}
9708
9709LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9710 armnn::IWorkloadFactory& workloadFactory,
9711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9712{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009713 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009714}
9715
9716LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9717 armnn::IWorkloadFactory& workloadFactory,
9718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9719{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009720 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009721}
9722
9723LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9724 armnn::IWorkloadFactory& workloadFactory,
9725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9726{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009727 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009728}
9729
9730LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9731 armnn::IWorkloadFactory& workloadFactory,
9732 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9733{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009734 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009735}
9736
9737LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9738 armnn::IWorkloadFactory& workloadFactory,
9739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9740{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009741 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009742}
9743
9744LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9745 armnn::IWorkloadFactory& workloadFactory,
9746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9747{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009748 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009749}
9750
9751LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9752 armnn::IWorkloadFactory& workloadFactory,
9753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9754{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009755 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009756}
9757
9758LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9759 armnn::IWorkloadFactory& workloadFactory,
9760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9761{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009762 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009763}
9764
9765LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9766 armnn::IWorkloadFactory& workloadFactory,
9767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009769 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009770}
9771
9772LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9773 armnn::IWorkloadFactory& workloadFactory,
9774 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9775{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009776 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009777}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009778
9779LayerTestResult<float, 4> Debug4DFloat32Test(
9780 armnn::IWorkloadFactory& workloadFactory,
9781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9782{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009783 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009784}
9785
9786LayerTestResult<float, 3> Debug3DFloat32Test(
9787 armnn::IWorkloadFactory& workloadFactory,
9788 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9789{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009790 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009791}
9792
9793LayerTestResult<float, 2> Debug2DFloat32Test(
9794 armnn::IWorkloadFactory& workloadFactory,
9795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9796{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009797 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009798}
9799
9800LayerTestResult<float, 1> Debug1DFloat32Test(
9801 armnn::IWorkloadFactory& workloadFactory,
9802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9803{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009804 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009805}
9806
9807LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9808 armnn::IWorkloadFactory& workloadFactory,
9809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9810{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009811 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009812}
9813
9814LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9815 armnn::IWorkloadFactory& workloadFactory,
9816 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9817{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009818 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009819}
9820
9821LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9822 armnn::IWorkloadFactory& workloadFactory,
9823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9824{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009825 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009826}
9827
9828LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9829 armnn::IWorkloadFactory& workloadFactory,
9830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9831{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009832 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009833}
Matteo Martincigh49124022019-01-11 13:25:59 +00009834
narpra014951d842019-01-18 16:53:53 +00009835LayerTestResult<float, 1> Gather1DParamsFloatTest(
9836 armnn::IWorkloadFactory& workloadFactory,
9837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9838{
9839 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9840}
9841
9842LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9843 armnn::IWorkloadFactory& workloadFactory,
9844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9845{
9846 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9847}
9848
9849LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9850 armnn::IWorkloadFactory& workloadFactory,
9851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9852{
9853 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9854}
9855
9856LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9857 armnn::IWorkloadFactory& workloadFactory,
9858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9859{
9860 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9861}
9862
9863LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9864 armnn::IWorkloadFactory& workloadFactory,
9865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9866{
9867 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9868}
9869
9870LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9871 armnn::IWorkloadFactory& workloadFactory,
9872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9873{
9874 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9875 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009876}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009877
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009878LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009879 armnn::IWorkloadFactory& workloadFactory,
9880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9881{
9882 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9883}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009884
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009885LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9886 armnn::IWorkloadFactory& workloadFactory,
9887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9888{
9889 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9890}
9891
9892LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9893 armnn::IWorkloadFactory& workloadFactory,
9894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9895{
9896 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9897}
9898
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009899LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9900 armnn::IWorkloadFactory& workloadFactory,
9901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9902{
9903 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9904}
9905
9906LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9907 armnn::IWorkloadFactory& workloadFactory,
9908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9909{
9910 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9911}
9912
9913LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9914 armnn::IWorkloadFactory& workloadFactory,
9915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9916{
9917 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9918}