blob: 43b0d33bdd4b68879ff210c5560995e670949604 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include "test/TensorHelpers.hpp"
10#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010011#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000012
13#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000019#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000020#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021
Éanna Ó Catháinde705582018-12-03 13:04:22 +000022#include <reference/workloads/RefWorkloads.hpp>
23
telsoa014fcda012018-03-09 14:13:49 +000024#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000034#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000035#include "SplitterTestImpl.hpp"
36#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000037#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000038#include "NormTestImpl.hpp"
39#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010040#include "LstmTestImpl.hpp"
41#include "ConvertFp16ToFp32TestImpl.hpp"
42#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000043#include "DebugTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000044
telsoa01c577f2c2018-08-31 09:22:23 +010045// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000046static std::vector<float> ConvInput3x8x16({
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
71});
72
telsoa01c577f2c2018-08-31 09:22:23 +010073// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000074static std::vector<float> Bias2({0, 2});
75
telsoa01c577f2c2018-08-31 09:22:23 +010076// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000077template<typename T>
78boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
79{
80 if(biasEnabled)
81 {
82 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
83 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
84 return bias;
85 }
86 else
87 {
88 return boost::multi_array<T, 1>();
89 }
90}
91
92template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000093LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
94 armnn::IWorkloadFactory& workloadFactory,
95 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
96 float qScale,
97 int32_t qOffset,
98 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +000099 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000100{
telsoa01c577f2c2018-08-31 09:22:23 +0100101 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000102 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
103 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
104
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000106 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
107 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
108 QuantizedVector<T>(qScale, qOffset, {
109 1, 1, 1,
110 1, -1, 1,
111 1, 1, 1,
112 1, 1, 1,
113 1, 1, 1,
114
115 0, 0, 0,
116 0, 0, 0,
117 0, 0, 0,
118 0, 0, 0,
119 0, 0, 0,
120
121 2, 2, 2,
122 2, 2, 2,
123 2, 2, 2,
124 2, 2, 2,
125 2, 2, 2,
126
127
128 0, 0, 0,
129 0, 0, 0,
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133
134 1, 1, 1,
135 1, 1, 1,
136 1, 1, 1,
137 1, 1, 1,
138 1, 1, 1,
139
140 0, 0, 0,
141 0, 0, 0,
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0
145 })));
146
telsoa01c577f2c2018-08-31 09:22:23 +0100147 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000148 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
149 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
150 QuantizedVector<T>(qScale, qOffset, {
151 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
152 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
153 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
154 -23.5f, -23.5f, -23.5f,
155 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
156 -23.5f, -23.5f, -23.5f,
157
158 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
159 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
160 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
162 })));
163
164 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000165 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000166 input,
167 kernel,
168 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
169 expectedOutput,
170 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100171 qOffset,
172 layout);
telsoa014fcda012018-03-09 14:13:49 +0000173}
174
175template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000176LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
177 armnn::IWorkloadFactory& workloadFactory,
178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
179 float qScale,
180 int32_t qOffset,
181 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000182 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000183{
telsoa01c577f2c2018-08-31 09:22:23 +0100184 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000187 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
188 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000191 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
192 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
193 QuantizedVector<T>(qScale, qOffset, {
194 1, 1, 1,
195 1, -1, 1,
196 1, 1, 1,
197
198 0, 0, 0,
199 0, 0, 0,
200 0, 0, 0,
201
202 2, 2, 2,
203 2, 2, 2,
204 2, 2, 2,
205
206
207 0, 0, 0,
208 0, 0, 0,
209 0, 0, 0,
210
211 1, 1, 1,
212 1, 1, 1,
213 1, 1, 1,
214
215 0, 0, 0,
216 0, 0, 0,
217 0, 0, 0
218 })));
219
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000221 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
222 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
223 QuantizedVector<T>(qScale, qOffset, {
224 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
225 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
226 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
227 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
228 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
229 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
230
231 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
233 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
234 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
235 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
237 })));
238
239 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000241 input,
242 kernel,
243 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
244 expectedOutput,
245 qScale,
narpra015f703182018-10-26 16:24:58 +0100246 qOffset,
247 layout);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Francis Murtaghd59116e2018-10-04 16:03:07 +0100250template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254 float qScale,
255 int32_t qOffset,
256 bool biasEnabled,
257 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100258{
259 // Use common single-batch 5x5 image.
260
261 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
262 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
263 {
264 1, 5, 2, 3,
265 8, 7, 3, 6,
266 3, 3, 9, 1
267 });
268
269
270 // Use a 2-element batch of 3-channel 3x3 kernels.
271 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
272 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
273 4, 5, 6,
274 0, 0, 0,
275 3, 2, 1
276 });
277
278 // Expected output is 1 batch of a 5x5 image.
279 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
280
281 const std::vector<float> outputData =
282 {
283 23, 41, 33, 21,
284 44, 65, 76, 52,
285 82, 85, 79, 42
286 };
287
288 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
289
290 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000291 memoryManager,
Francis Murtaghd59116e2018-10-04 16:03:07 +0100292 input,
293 kernel,
294 boost::multi_array<T, 1>(),
295 expectedOutput,
296 dataLayout,
297 qScale,
298 qOffset);
299}
300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000301LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
302 armnn::IWorkloadFactory& workloadFactory,
303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
304 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000305 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000306{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000307 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000308}
309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000310LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000314 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000315{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000316 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000319LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
320 armnn::IWorkloadFactory& workloadFactory,
321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
322 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000323 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000325 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000326}
327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000328LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
329 armnn::IWorkloadFactory& workloadFactory,
330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
331 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100332{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000333 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
334 memoryManager,
335 0.f,
336 0,
337 biasEnabled,
338 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100339}
340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000341LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000345 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000346{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000347 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000348}
349
350template<typename T>
351LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
352 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000354 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000355 float qScale,
356 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000357{
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000359 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
360 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
361 QuantizedVector<T>(qScale, qOffset, {
362 11,21,31,
363 12,22,32,
364 13,23,33
365 })));
366
telsoa01c577f2c2018-08-31 09:22:23 +0100367 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000368 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
369 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
370 QuantizedVector<T>(qScale, qOffset, {
371 -11,-21,
372 -12,-22,
373 })));
374
telsoa01c577f2c2018-08-31 09:22:23 +0100375// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000376// Manually calculated like this:
377//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
378//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
379//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
380//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
381//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
382//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
383//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
384 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
385 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
386 QuantizedVector<T>(qScale, qOffset, {
387 0, 0, 0, 0, 0, 0,
388 -242, -594, -934, -372, 0, 0,
389 -495, -1190, -1850, -725, 0, 0,
390 -538, -1256, -1916, -748, 0, 0,
391 -273, -626, -946, -363, 0, 0,
392 0, 0, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0
395 })));
396
397 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000398 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000399 input,
400 kernel,
401 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
402 expectedOutput,
403 qScale,
404 qOffset,
narpra015f703182018-10-26 16:24:58 +0100405 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100406 1, // Padding left.
407 2, // Padding top.
408 3, // Padding right.
409 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000410}
411
412template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000413LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
414 armnn::IWorkloadFactory& workloadFactory,
415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000416 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000417 float qScale,
418 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000419{
telsoa01c577f2c2018-08-31 09:22:23 +0100420 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000421 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
422 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
423 QuantizedVector<T>(qScale, qOffset, {
424 11,21,31,41,51,
425 12,22,32,42,52,
426 13,23,33,43,53,
427 14,24,34,44,54,
428 15,25,35,45,55,
429 })));
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000432 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
433 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
434 QuantizedVector<T>(qScale, qOffset, {
435 -11,-21,-31,-41,
436 -12,-22,-32,-42,
437 -13,-23,-33,-43,
438 -14,-24,-34,-44,
439 })));
440
telsoa01c577f2c2018-08-31 09:22:23 +0100441 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000442 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
443 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
444 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
445 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000446 -7140, -10580, -13940, -9300, -5230,
447 -9590, -14120, -18520, -12290, -6860,
448 -9980, -14560, -18960, -12560, -7000,
449 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100450 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000451 })));
452
453 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000454 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000455 input,
456 kernel,
457 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
458 expectedOutput,
459 qScale,
460 qOffset,
narpra015f703182018-10-26 16:24:58 +0100461 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100462 1, // Padding left.
463 1, // Padding top.
464 2, // Padding right.
465 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100466}
467
468template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
470 armnn::IWorkloadFactory& workloadFactory,
471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
472 float qScale,
473 int32_t qOffset,
474 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000475 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100476{
telsoa01c577f2c2018-08-31 09:22:23 +0100477 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100478 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
479 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
480 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
481 0, 1, 2, 3, 4,
482 5, 6, 7, 8, 9,
483 10, 11, 12, 13, 14,
484 15, 16, 17, 18, 19,
485 20, 21, 22, 23, 24,
486
487 25, 26, 27, 28, 29,
488 30, 31, 32, 33, 34,
489 35, 36, 37, 38, 39,
490 40, 41, 42, 43, 44,
491 45, 46, 47, 48, 49
492 })));
493
telsoa01c577f2c2018-08-31 09:22:23 +0100494 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100495 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
496 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
497 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
498 32, 31, 30, 29,
499 28, 27, 26, 25,
500 24, 23, 22, 21,
501 20, 19, 18, 17,
502
503 16, 15, 14, 13,
504 12, 11, 10, 9,
505 8, 7, 6, 5,
506 4, 3, 2, 1
507 })));
508
telsoa01c577f2c2018-08-31 09:22:23 +0100509 // Expected output is 1 batch of a 2-channel 5x5 image.
510 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100511 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
512 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
513 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
514 1062, 1580, 1850, 1530, 1117,
515 2140, 3108, 3500, 2842, 2042,
516 3580, 5068, 5460, 4342, 3062,
517 3618, 5072, 5390, 4248, 2971,
518 3074, 4282, 4510, 3533, 2457,
519 1550, 2284, 2362, 1955, 1428,
520 2910, 4206, 4342, 3528, 2536,
521 3390, 4886, 5022, 4068, 2916,
522 3566, 5056, 5182, 4133, 2922,
523 3100, 4352, 4452, 3517, 2465
524 })));
525
526 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000527 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100528 input,
529 kernel,
530 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
531 expectedOutput,
532 qScale,
533 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100534 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100535 1, // Padding left.
536 1, // Padding top.
537 2, // Padding right.
538 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100539 1, // strideX
540 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000541}
542
Nikhil Rajcec6b652018-10-12 13:51:57 +0100543template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000544LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
545 armnn::IWorkloadFactory& workloadFactory,
546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
547 float qScale,
548 int32_t qOffset,
549 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100550{
551 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
552 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
553 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
554 0, 25,
555 1, 26,
556 2, 27,
557 3, 28,
558 4, 29,
559
560 5, 30,
561 6, 31,
562 7, 32,
563 8, 33,
564 9, 34,
565
566 10, 35,
567 11, 36,
568 12, 37,
569 13, 38,
570 14, 39,
571
572 15, 40,
573 16, 41,
574 17, 42,
575 18, 43,
576 19, 44,
577
578 20, 45,
579 21, 46,
580 22, 47,
581 23, 48,
582 24, 49
583 })));
584
585 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
586 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
587 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
588 32, 16,
589 31, 15,
590 30, 14,
591 29, 13,
592
593 28, 12,
594 27, 11,
595 26, 10,
596 25, 9,
597
598 24, 8,
599 23, 7,
600 22, 6,
601 21, 5,
602
603 20, 4,
604 19, 3,
605 18, 2,
606 17, 1
607 })));
608
609 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
610 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
611 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
612 1062, 1550,
613 1580, 2284,
614 1850, 2362,
615 1530, 1955,
616 1117, 1428,
617
618 2140, 2910,
619 3108, 4206,
620 3500, 4342,
621 2842, 3528,
622 2042, 2536,
623
624 3580, 3390,
625 5068, 4886,
626 5460, 5022,
627 4342, 4068,
628 3062, 2916,
629
630 3618, 3566,
631 5072, 5056,
632 5390, 5182,
633 4248, 4133,
634 2971, 2922,
635
636 3074, 3100,
637 4282, 4352,
638 4510, 4452,
639 3533, 3517,
640 2457, 2465
641 })));
642
643 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000644 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100645 input,
646 kernel,
647 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
648 expectedOutput,
649 qScale,
650 qOffset,
651 1, // Padding left.
652 1, // Padding top.
653 2, // Padding right.
654 2, // Padding bottom.
655 1, // strideX
656 1); // strideY
657}
658
telsoa014fcda012018-03-09 14:13:49 +0000659LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000660Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
661 armnn::IWorkloadFactory& workloadFactory,
662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000663 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000664{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
666 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000667}
668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000669LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
670 armnn::IWorkloadFactory& workloadFactory,
671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000672 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000673{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000674 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
675 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000676}
677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000678LayerTestResult<float, 4> DepthwiseConvolution2dTest(
679 armnn::IWorkloadFactory& workloadFactory,
680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
681 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000682 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000683{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000684 return DepthwiseConvolution2dTestImpl<float, float>(
685 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000686}
687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000688LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
689 armnn::IWorkloadFactory& workloadFactory,
690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
691 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100692{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000693 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100694}
695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000696LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
699 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000700 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000701{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000702 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
703 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000704}
705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
709 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000710 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100711{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000712 return DepthwiseConvolution2dAsymmetricTestCommon<float>(
713 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100714}
715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000716LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
717 armnn::IWorkloadFactory& workloadFactory,
718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
719 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000720 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000721{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000722 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
723 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000724}
725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000726LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
729 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000730 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000731{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000732 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
733 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000734}
735
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736LayerTestResult<float, 4> Convolution1dTest(
737 armnn::IWorkloadFactory& workloadFactory,
738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
739 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000740{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000741 return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000742}
743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000744LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
745 armnn::IWorkloadFactory& workloadFactory,
746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
747 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000748{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749 return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000750}
751
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000752LayerTestResult<float,4> CompareConvolution2dTest(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
755 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000756{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757 return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000758}
759
760template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000761LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
762 armnn::IWorkloadFactory& workloadFactory,
763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
764 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000765 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000766{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000767 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000768}
769
770template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000771 armnn::IWorkloadFactory&,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
773 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000774 const armnn::DataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000776template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
777 armnn::IWorkloadFactory&,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
779 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000780 const armnn::DataLayout);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000781
782LayerTestResult<float,4> SimpleNormalizationAcrossTest(
783 armnn::IWorkloadFactory& workloadFactory,
784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000785{
786 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
787 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000788 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000789}
790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000791LayerTestResult<float,4> SimpleNormalizationWithinTest(
792 armnn::IWorkloadFactory& workloadFactory,
793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000794{
795 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
796 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000797 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000798}
799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000800LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
801 armnn::IWorkloadFactory& workloadFactory,
802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100803{
804 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
805 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000806 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100807}
808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000809LayerTestResult<float,2> SimpleSoftmaxTest(
810 armnn::IWorkloadFactory& workloadFactory,
811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
812 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000813{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000814 return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000815}
816
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000817LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
818 armnn::IWorkloadFactory& workloadFactory,
819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
820 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000821{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000822 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000823}
824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000825LayerTestResult<float,4> CompareNormalizationTest(
826 armnn::IWorkloadFactory& workloadFactory,
827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
828 armnn::IWorkloadFactory& refWorkloadFactory,
829 armnn::NormalizationAlgorithmChannel normChannel,
830 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000831{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000832 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000833}
834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000835LayerTestResult<float,2> CompareSoftmaxTest(
836 armnn::IWorkloadFactory& workloadFactory,
837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000838 armnn::IWorkloadFactory& refWorkloadFactory,
839 float beta)
840{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000841 return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000842}
843
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000844LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
845 armnn::IWorkloadFactory& workloadFactory,
846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000847 armnn::IWorkloadFactory& refWorkloadFactory,
848 float beta)
849{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000851}
852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000853std::vector<LayerTestResult<float,3>> SplitterTest(
854 armnn::IWorkloadFactory& workloadFactory,
855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000856{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857 return SplitterTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000858}
859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000860std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
861 armnn::IWorkloadFactory& workloadFactory,
862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000863{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000864 return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000865}
866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867LayerTestResult<float, 3> CopyViaSplitterTest(
868 armnn::IWorkloadFactory& workloadFactory,
869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000870{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000871 return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000872}
873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000874LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
875 armnn::IWorkloadFactory& workloadFactory,
876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000877{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000878 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000879}
880
telsoa01c577f2c2018-08-31 09:22:23 +0100881LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000882 armnn::IWorkloadFactory& workloadFactory,
883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100884{
885 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
886 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
887 { 2., 3., 3., 4. }));
888
889 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
890 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
891 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
892 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000893 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
894 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100895}
896
897LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000898 armnn::IWorkloadFactory& workloadFactory,
899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100900{
901 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
902 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
903 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
904 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
905
906 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
907 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
908 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
909 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
910 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
911 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
912 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
913 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
914 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000915 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100916}
917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000918LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100921{
922 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
923 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
924 {2., 3., 3., 4.}));
925
926
927 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
928 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
929 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
930 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000932 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
933 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100934}
935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000936LayerTestResult<float,3> MergerTest(
937 armnn::IWorkloadFactory& workloadFactory,
938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000939{
surmeh013537c2c2018-05-18 16:31:43 +0100940 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000941 unsigned int outputHeight = 6;
942 unsigned int outputChannels = 3;
943
surmeh013537c2c2018-05-18 16:31:43 +0100944 unsigned int inputWidth1 = 3;
945 unsigned int inputHeight1 = 6;
946 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000947
surmeh013537c2c2018-05-18 16:31:43 +0100948 unsigned int inputWidth2 = 3;
949 unsigned int inputHeight2 = 6;
950 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000951
telsoa01c577f2c2018-08-31 09:22:23 +0100952 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000953 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
954 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
955 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000956
957 LayerTestResult<float,3> ret(outputTensorInfo);
958
telsoa014fcda012018-03-09 14:13:49 +0000959 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100960 {
961 1.0f, 2.0f, 3.0f,
962 4.0f, 5.0f, 6.0f,
963 7.0f, 8.0f, 9.0f,
964 10.0f, 11.0f, 12.0f,
965 13.0f, 14.0f, 15.0f,
966 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000967
surmeh013537c2c2018-05-18 16:31:43 +0100968 19.0f, 20.0f, 21.0f,
969 22.0f, 23.0f, 24.0f,
970 25.0f, 26.0f, 27.0f,
971 28.0f, 29.0f, 30.0f,
972 31.0f, 32.0f, 33.0f,
973 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000974
surmeh013537c2c2018-05-18 16:31:43 +0100975 37.0f, 38.0f, 39.0f,
976 40.0f, 41.0f, 42.0f,
977 43.0f, 44.0f, 45.0f,
978 46.0f, 47.0f, 48.0f,
979 49.0f, 50.0f, 51.0f,
980 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000981 })
982 );
983
telsoa014fcda012018-03-09 14:13:49 +0000984 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
985 {
surmeh013537c2c2018-05-18 16:31:43 +0100986 1.0f, 2.0f, 3.0f,
987 4.0f, 5.0f, 6.0f,
988 7.0f, 8.0f, 9.0f,
989 10.0f, 11.0f, 12.0f,
990 13.0f, 14.0f, 15.0f,
991 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000992
surmeh013537c2c2018-05-18 16:31:43 +0100993 19.0f, 20.0f, 21.0f,
994 22.0f, 23.0f, 24.0f,
995 25.0f, 26.0f, 27.0f,
996 28.0f, 29.0f, 30.0f,
997 31.0f, 32.0f, 33.0f,
998 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000999 })
1000 );
1001
1002 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1003 {
surmeh013537c2c2018-05-18 16:31:43 +01001004 37.0f, 38.0f, 39.0f,
1005 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001006 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001007 46.0f, 47.0f, 48.0f,
1008 49.0f, 50.0f, 51.0f,
1009 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001010 })
1011 );
1012
telsoa01c577f2c2018-08-31 09:22:23 +01001013 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001014 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1015
telsoa01c577f2c2018-08-31 09:22:23 +01001016 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001017 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1018
telsoa014fcda012018-03-09 14:13:49 +00001019 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1020
1021 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1022
1023 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1024 subTensorsSupported ?
1025 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1026 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1027
1028 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1029 subTensorsSupported ?
1030 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1031 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1032
telsoa014fcda012018-03-09 14:13:49 +00001033 armnn::MergerQueueDescriptor data;
1034 armnn::WorkloadInfo info;
1035 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1036 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001037 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1038
1039 data.m_ViewOrigins.push_back(window1);
1040 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001041
1042 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1043
1044 inputHandle1->Allocate();
1045 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001046 outputHandle->Allocate();
1047
1048 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1049 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001050
1051 workload->Execute();
1052
1053 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1054
1055 return ret;
1056}
1057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001058LayerTestResult<float,4> AdditionTest(
1059 armnn::IWorkloadFactory& workloadFactory,
1060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001061{
1062 unsigned int batchSize = 2;
1063 unsigned int channels = 2;
1064 unsigned int height = 2;
1065 unsigned int width = 3;
1066
1067 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1068 armnn::TensorInfo outputTensorInfo;
1069
1070 unsigned int shape[] = {batchSize, channels, height, width};
1071
1072 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1073 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1074 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1075
1076
1077 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1078 {
1079 0.0f, 2.0f, 1.0f,
1080 0.2f, 1.0f, 2.0f,
1081
1082 1.0f, 2.0f, 1.0f,
1083 0.2f, 1.0f, 2.0f,
1084
1085 0.0f, 2.0f, 1.0f,
1086 4.2f, 1.0f, 2.0f,
1087
1088 0.0f, 0.0f, 1.0f,
1089 0.2f, 1.0f, 2.0f,
1090 }));
1091
1092 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1093 {
1094 1.0f, 2.0f, 1.0f,
1095 0.0f, 1.0f, 2.0f,
1096
1097 1.0f, 2.0f, -2.0f,
1098 0.2f, 1.0f, 2.0f,
1099
1100 0.0f, 2.0f, 1.0f,
1101 4.2f, 0.0f, -3.0f,
1102
1103 0.0f, 0.0f, 1.0f,
1104 0.7f, 1.0f, 5.0f,
1105 }));
1106
1107 LayerTestResult<float,4> ret(outputTensorInfo);
1108 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1109 {
1110 1.0f, 4.0f, 2.0f,
1111 0.2f, 2.0f, 4.0f,
1112
1113 2.0f, 4.0f, -1.0f,
1114 0.4f, 2.0f, 4.0f,
1115
1116 0.0f, 4.0f, 2.0f,
1117 8.4f, 1.0f, -1.0f,
1118
1119 0.0f, 0.0f, 2.0f,
1120 0.9f, 2.0f, 7.0f,
1121 }));
1122
1123 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1124 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1125 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1126
1127 armnn::AdditionQueueDescriptor data;
1128 armnn::WorkloadInfo info;
1129 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1130 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1131 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1132
1133 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1134
1135 inputHandle1->Allocate();
1136 inputHandle2->Allocate();
1137 outputHandle->Allocate();
1138
1139 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1140 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1141
1142 workload->Execute();
1143
1144 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1145
1146 return ret;
1147}
1148
1149template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001150LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1151 armnn::IWorkloadFactory& workloadFactory,
1152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001153 float qScale,
1154 int32_t qOffset)
1155{
1156 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1157 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1158 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1159
1160 if (armnn::IsQuantizedType<T>())
1161 {
1162 inputTensorInfo1.SetQuantizationScale(qScale);
1163 inputTensorInfo1.SetQuantizationOffset(qOffset);
1164 inputTensorInfo2.SetQuantizationScale(qScale);
1165 inputTensorInfo2.SetQuantizationOffset(qOffset);
1166 outputTensorInfo.SetQuantizationScale(qScale);
1167 outputTensorInfo.SetQuantizationOffset(qOffset);
1168 }
1169
1170 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1171 {
1172 0.0f,
1173 1.0f,
1174
1175 2.0f,
1176 3.0f,
1177
1178 4.0f,
1179 5.0f,
1180 }));
1181
1182 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1183 {
1184 0.5f, 1.5f, 2.5f,
1185 3.5f, 4.5f, 5.5f,
1186 }));
1187
1188 LayerTestResult<T,4> ret(outputTensorInfo);
1189 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1190 {
1191 0.5f, 1.5f, 2.5f,
1192 4.5f, 5.5f, 6.5f,
1193
1194 2.5f, 3.5f, 4.5f,
1195 6.5f, 7.5f, 8.5f,
1196
1197 4.5f, 5.5f, 6.5f,
1198 8.5f, 9.5f, 10.5f,
1199 }));
1200
1201 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1202 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1203 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1204
1205 armnn::AdditionQueueDescriptor data;
1206 armnn::WorkloadInfo info;
1207 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1208 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1210
1211 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1212
1213 inputHandle1->Allocate();
1214 inputHandle2->Allocate();
1215 outputHandle->Allocate();
1216
1217 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1218 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1219
1220 workload->Execute();
1221
1222 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1223
1224 return ret;
1225}
1226
1227template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001228LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1229 armnn::IWorkloadFactory& workloadFactory,
1230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001231 float qScale,
1232 int32_t qOffset)
1233{
1234 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1235 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1236 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1237
1238 if (armnn::IsQuantizedType<T>())
1239 {
1240 inputTensorInfo1.SetQuantizationScale(qScale);
1241 inputTensorInfo1.SetQuantizationOffset(qOffset);
1242 inputTensorInfo2.SetQuantizationScale(qScale);
1243 inputTensorInfo2.SetQuantizationOffset(qOffset);
1244 outputTensorInfo.SetQuantizationScale(qScale);
1245 outputTensorInfo.SetQuantizationOffset(qOffset);
1246 }
1247
1248 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1249 {
1250 0.0f, 1.0f, 2.0f,
1251 3.0f, 4.0f, 5.0f,
1252 6.0f, 7.0f, 8.0f,
1253 9.0f, 10.0f, 11.0f,
1254 12.0f, 13.0f, 14.0f,
1255 15.0f, 16.0f, 17.0f,
1256 }));
1257
1258 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1259 {
1260 0.5f,
1261 }));
1262
1263 LayerTestResult<T,4> ret(outputTensorInfo);
1264 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1265 {
1266 0.5f, 1.5f, 2.5f,
1267 3.5f, 4.5f, 5.5f,
1268 6.5f, 7.5f, 8.5f,
1269 9.5f, 10.5f, 11.5f,
1270 12.5f, 13.5f, 14.5f,
1271 15.5f, 16.5f, 17.5f,
1272 }));
1273
1274 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1275 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1276 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1277
1278 armnn::AdditionQueueDescriptor data;
1279 armnn::WorkloadInfo info;
1280 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1281 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1282 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283
1284 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1285
1286 inputHandle1->Allocate();
1287 inputHandle2->Allocate();
1288 outputHandle->Allocate();
1289
1290 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1291 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1292
1293 workload->Execute();
1294
1295 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1296
1297 return ret;
1298}
1299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001300LayerTestResult<float, 4> AdditionBroadcastTest(
1301 armnn::IWorkloadFactory& workloadFactory,
1302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001303{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001304 return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001305}
1306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001307LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1308 armnn::IWorkloadFactory& workloadFactory,
1309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001310{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001311 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001312}
1313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001314LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1315 armnn::IWorkloadFactory& workloadFactory,
1316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001317{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001318 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001319}
1320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001321LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1322 armnn::IWorkloadFactory& workloadFactory,
1323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001325 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001326}
1327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001328LayerTestResult<float,4> CompareAdditionTest(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001332{
1333 unsigned int batchSize = 4;
1334 unsigned int channels = 1;
1335 unsigned int height = 2;
1336 unsigned int width = 3;
1337
1338 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1339 armnn::TensorInfo outputTensorInfo;
1340
1341 unsigned int shape[] = {batchSize, channels, height, width};
1342
1343 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1344 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1345 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1346
1347 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1348 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1349
1350 LayerTestResult<float,4> ret(outputTensorInfo);
1351
1352 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1353 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1354 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1355
1356 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1357 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1358 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1359
1360 armnn::AdditionQueueDescriptor data;
1361 armnn::WorkloadInfo info;
1362 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1363 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1364 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1365
1366 armnn::AdditionQueueDescriptor refData = data;
1367 armnn::WorkloadInfo refInfo = info;
1368 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1369 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1370 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1371
1372 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1373 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1374
1375 inputHandle1->Allocate();
1376 inputHandle2->Allocate();
1377 outputHandle->Allocate();
1378 inputHandle1Ref->Allocate();
1379 inputHandle2Ref->Allocate();
1380 outputHandleRef->Allocate();
1381
1382 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1383 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1384 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1385 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1386
1387 workload->Execute();
1388 workloadRef->Execute();
1389
1390 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1391 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1392
1393 return ret;
1394}
1395
surmeh01bceff2f2018-03-29 16:29:27 +01001396namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001397template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001398LayerTestResult<T, 4> DivisionTestHelper(
1399 armnn::IWorkloadFactory& workloadFactory,
1400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1401 const unsigned int shape0[4],
1402 const std::vector<T>& values0,
1403 float scale0,
1404 int32_t offset0,
1405 const unsigned int shape1[4],
1406 const std::vector<T> & values1,
1407 float scale1,
1408 int32_t offset1,
1409 const unsigned int outShape[4],
1410 const std::vector<T> & outValues,
1411 float outScale,
1412 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001413{
1414 auto dataType = (std::is_same<T, uint8_t>::value ?
1415 armnn::DataType::QuantisedAsymm8 :
1416 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001417
David Beck5cd01f32018-09-12 16:00:08 +01001418 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1419 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1420 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001421
David Beck5cd01f32018-09-12 16:00:08 +01001422 inputTensorInfo0.SetQuantizationScale(scale0);
1423 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001424
David Beck5cd01f32018-09-12 16:00:08 +01001425 inputTensorInfo1.SetQuantizationScale(scale1);
1426 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001427
David Beck5cd01f32018-09-12 16:00:08 +01001428 outputTensorInfo.SetQuantizationScale(outScale);
1429 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001430
David Beck5cd01f32018-09-12 16:00:08 +01001431 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1432 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001433
David Beck5cd01f32018-09-12 16:00:08 +01001434 LayerTestResult<T, 4> result(outputTensorInfo);
1435 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001436
David Beck5cd01f32018-09-12 16:00:08 +01001437 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1438 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1439 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001440
David Beck5cd01f32018-09-12 16:00:08 +01001441 armnn::DivisionQueueDescriptor data;
1442 armnn::WorkloadInfo info;
1443 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1444 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1445 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001446
David Beck5cd01f32018-09-12 16:00:08 +01001447 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001448
David Beck5cd01f32018-09-12 16:00:08 +01001449 inputHandle0->Allocate();
1450 inputHandle1->Allocate();
1451 outputHandle->Allocate();
1452
1453 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1454 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1455
David Beck5cd01f32018-09-12 16:00:08 +01001456 workload->Execute();
1457
1458 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1459
1460 return result;
1461}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001462} // anonymous namespace
1463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001464LayerTestResult<float,4> DivisionByZeroTest(
1465 armnn::IWorkloadFactory& workloadFactory,
1466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001467{
1468 const unsigned int width = 2;
1469 const unsigned int height = 2;
1470 const unsigned int channelCount = 2;
1471 const unsigned int batchSize = 2;
1472
1473 unsigned int shape[] = { batchSize, channelCount, height, width };
1474
1475 std::vector<float> input0({
1476 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1477 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1478
1479 std::vector<float> input1({
1480 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1481 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1482
1483 std::vector<float> output({
1484 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1485 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1486
David Beck5cd01f32018-09-12 16:00:08 +01001487 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001488 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001489 shape, input0, 1.0f, 0,
1490 shape, input1, 1.0f, 0,
1491 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001492}
1493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001494LayerTestResult<float,4> DivisionTest(
1495 armnn::IWorkloadFactory& workloadFactory,
1496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001497{
1498 const unsigned int width = 2;
1499 const unsigned int height = 2;
1500 const unsigned int channelCount = 2;
1501 const unsigned int batchSize = 2;
1502
1503 unsigned int shape[] = { batchSize, channelCount, height, width };
1504
1505 std::vector<float> input0({
1506 2, 2, 2, 2, 3, 3, 3, 3,
1507 4, 4, 4, 4, 5, 5, 5, 5 });
1508
1509 std::vector<float> input1({
1510 1, 1, 1, 1, 2, 2, 2, 2,
1511 4, 4, 4, 4, 4, 4, 4, 4 });
1512
1513 std::vector<float> output({
1514 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1515 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1516
David Beck5cd01f32018-09-12 16:00:08 +01001517
1518 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001519 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001520 shape, input0, 1.0f, 0,
1521 shape, input1, 1.0f, 0,
1522 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001523}
1524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001525LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1526 armnn::IWorkloadFactory& workloadFactory,
1527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001528{
1529 unsigned int shape0[] = { 1, 2, 2, 2 };
1530 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1531
1532 unsigned int shape1[] = { 1, 1, 1, 1 };
1533 std::vector<float> input1({ 2 });
1534
1535 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1536
David Beck5cd01f32018-09-12 16:00:08 +01001537
1538 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001539 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001540 shape0, input0, 1.0f, 0,
1541 shape1, input1, 1.0f, 0,
1542 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001543}
1544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001545LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1546 armnn::IWorkloadFactory& workloadFactory,
1547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001548{
1549 unsigned int shape0[] = { 1, 3, 3, 2 };
1550 std::vector<float> input0({
1551 1, 4, 3, 8, 5, 12,
1552 7, 16, 9, 20, 11, 24,
1553 13, 28, 15, 32, 17, 36});
1554
1555 unsigned int shape1[] = { 1, 1, 1, 2 };
1556 std::vector<float> input1({ 1, 2 });
1557
1558 std::vector<float> output({
1559 1, 2, 3, 4, 5, 6,
1560 7, 8, 9, 10, 11, 12,
1561 13, 14, 15, 16, 17, 18});
1562
David Beck5cd01f32018-09-12 16:00:08 +01001563 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001564 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001565 shape0, input0, 1.0f, 0,
1566 shape1, input1, 1.0f, 0,
1567 shape0, output, 1.0f, 0);
1568}
1569
1570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001571LayerTestResult<uint8_t,4> DivisionUint8Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001574{
1575 const unsigned int width = 2;
1576 const unsigned int height = 2;
1577 const unsigned int channelCount = 2;
1578 const unsigned int batchSize = 2;
1579
1580 unsigned int shape[] = { batchSize, channelCount, height, width };
1581
1582 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1583 4, 4, 4, 4, 5, 5, 5, 5 });
1584
1585 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1586 4, 4, 4, 4, 4, 4, 4, 4 });
1587
1588 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1589 4, 4, 4, 4, 5, 5, 5, 5});
1590
1591
1592 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001593 memoryManager,
1594 shape, input0, 1.0f, 0,
1595 shape, input1, 1.0f, 0,
1596 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001597}
1598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001599LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1600 armnn::IWorkloadFactory& workloadFactory,
1601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001602{
1603 unsigned int shape0[] = { 1, 2, 2, 2 };
1604 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1605
1606 unsigned int shape1[] = { 1, 1, 1, 1 };
1607 std::vector<uint8_t> input1({ 2 });
1608
1609 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1610
1611 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001612 memoryManager,
1613 shape0, input0, 1.0f, 0,
1614 shape1, input1, 1.0f, 0,
1615 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001616}
1617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001618LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1619 armnn::IWorkloadFactory& workloadFactory,
1620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001621{
1622 unsigned int shape0[] = { 1, 3, 3, 2 };
1623 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1624 7, 16, 9, 20, 11, 24,
1625 13, 28, 15, 32, 17, 36});
1626
1627 unsigned int shape1[] = { 1, 1, 1, 2 };
1628 std::vector<uint8_t> input1({ 1, 2 });
1629
1630 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1631 7, 8, 9, 10, 11, 12,
1632 13, 14, 15, 16, 17, 18});
1633
1634 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001635 memoryManager,
1636 shape0, input0, 1.0f, 0,
1637 shape1, input1, 1.0f, 0,
1638 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001639}
1640
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001641template<typename DescriptorType>
1642std::unique_ptr<armnn::IWorkload> CreateWorkload(
1643 const armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::WorkloadInfo& info,
1645 const DescriptorType& descriptor)
1646{
1647 return CreateWorkload(workloadFactory, info, descriptor);
1648};
1649
1650template<>
1651std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1652 const armnn::IWorkloadFactory& workloadFactory,
1653 const armnn::WorkloadInfo& info,
1654 const armnn::MaximumQueueDescriptor& descriptor)
1655{
1656 return workloadFactory.CreateMaximum(descriptor, info);
1657}
1658
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001659template<>
1660std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1661 const armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::WorkloadInfo& info,
1663 const armnn::MinimumQueueDescriptor& descriptor)
1664{
1665 return workloadFactory.CreateMinimum(descriptor, info);
1666}
1667
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001668template<>
1669std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
1670 const armnn::IWorkloadFactory& workloadFactory,
1671 const armnn::WorkloadInfo& info,
1672 const armnn::EqualQueueDescriptor& descriptor)
1673{
1674 return workloadFactory.CreateEqual(descriptor, info);
1675}
1676
FrancisMurtagh878f0232018-12-19 10:56:15 +00001677template<>
1678std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
1679 const armnn::IWorkloadFactory& workloadFactory,
1680 const armnn::WorkloadInfo& info,
1681 const armnn::GreaterQueueDescriptor& descriptor)
1682{
1683 return workloadFactory.CreateGreater(descriptor, info);
1684}
1685
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001686namespace {
1687 template <typename Descriptor, typename dataType>
1688 LayerTestResult<dataType, 4> ElementwiseTestHelper
1689 (armnn::IWorkloadFactory & workloadFactory,
1690 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1691 const unsigned int shape0[4], std::vector<dataType> values0,
1692 const unsigned int shape1[4], std::vector<dataType> values1,
1693 const unsigned int outShape[4], std::vector<dataType> outValues,
1694 float qScale = 0.0f, int qOffset = 0)
1695 {
1696 const size_t dimensionCount = 4;
1697 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
1698 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
1699 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
1700
1701 auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
1702 auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
1703
1704 if (armnn::IsQuantizedType<dataType>())
1705 {
1706 inputTensorInfo0.SetQuantizationScale(qScale);
1707 inputTensorInfo0.SetQuantizationOffset(qOffset);
1708
1709 inputTensorInfo1.SetQuantizationScale(qScale);
1710 inputTensorInfo1.SetQuantizationOffset(qOffset);
1711
1712 outputTensorInfo.SetQuantizationScale(qScale);
1713 outputTensorInfo.SetQuantizationOffset(qOffset);
1714 }
1715
1716 LayerTestResult<dataType,4> ret(outputTensorInfo);
1717
1718 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1719 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1720 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1721
1722 Descriptor data;
1723 armnn::WorkloadInfo info;
1724 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1725 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1726 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1727 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1728
1729 inputHandle0->Allocate();
1730 inputHandle1->Allocate();
1731 outputHandle->Allocate();
1732
1733 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1734 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1735
1736 ExecuteWorkload(*workload, memoryManager);
1737
1738 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1739
1740 ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
1741 return ret;
1742 }
1743}
1744
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001745LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1747{
1748 const unsigned int width = 2;
1749 const unsigned int height = 2;
1750 const unsigned int channelCount = 2;
1751 const unsigned int batchSize = 2;
1752
1753 unsigned int shape[] = { batchSize, channelCount, height, width };
1754
1755 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1756 3, 3, 3, 3, 4, 4, 4, 4 });
1757
1758 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
1759 5, 5, 5, 5, 4, 4, 4, 4 });
1760
1761 std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
1762 0, 0, 0, 0, 1, 1, 1, 1 });
1763
1764 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1765 (workloadFactory,
1766 memoryManager,
1767 shape,
1768 input0,
1769 shape,
1770 input1,
1771 shape,
1772 output);
1773}
1774
1775LayerTestResult<float, 4> EqualBroadcast1ElementTest(
1776 armnn::IWorkloadFactory& workloadFactory,
1777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1778{
1779 unsigned int shape0[] = { 1, 2, 2, 2 };
1780 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1781
1782 unsigned int shape1[] = { 1, 1, 1, 1 };
1783 std::vector<float> input1({ 1 });
1784
1785 std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
1786
1787 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1788 (workloadFactory,
1789 memoryManager,
1790 shape0,
1791 input0,
1792 shape1,
1793 input1,
1794 shape0,
1795 output);
1796}
1797
1798LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
1799 armnn::IWorkloadFactory& workloadFactory,
1800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1801{
1802 const unsigned int shape0[] = { 1, 2, 2, 3 };
1803 const unsigned int shape1[] = { 1, 1, 1, 3 };
1804
1805 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1806 7, 8, 9, 10, 11, 12 });
1807
1808 std::vector<float> input1({ 1, 2, 3});
1809
1810 std::vector<float> output({ 1, 1, 1, 0, 0, 0,
1811 0, 0, 0, 0, 0, 0 });
1812
1813 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1814 (workloadFactory,
1815 memoryManager,
1816 shape0,
1817 input0,
1818 shape1,
1819 input1,
1820 shape0,
1821 output);
1822}
1823
1824LayerTestResult<uint8_t, 4> EqualUint8Test(
1825 armnn::IWorkloadFactory& workloadFactory,
1826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1827{
1828 unsigned int shape[] = { 2, 2, 2, 2 };
1829
1830 // See dequantized values to the right.
1831 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
1832 3, 3, 3, 3, 5, 5, 5, 5 });
1833
1834 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
1835 3, 3, 3, 3, 5, 5, 5, 5 });
1836
1837 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1838 1, 1, 1, 1, 0, 0, 0, 0 });
1839
1840 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
1841 (workloadFactory,
1842 memoryManager,
1843 shape,
1844 input0,
1845 shape,
1846 input1,
1847 shape,
1848 output,
1849 1.0f,
1850 0);
1851}
1852
1853LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
1854 armnn::IWorkloadFactory& workloadFactory,
1855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1856{
1857 const unsigned int shape0[] = { 1, 2, 2, 3 };
1858 const unsigned int shape1[] = { 1, 1, 1, 1 };
1859
1860 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1861 7, 8, 9, 10, 11, 12 });
1862
1863 std::vector<uint8_t> input1({ 1 });
1864
1865 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
1866 0, 0, 0, 0, 0, 0 });
1867
1868 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
1869 (workloadFactory,
1870 memoryManager,
1871 shape0,
1872 input0,
1873 shape1,
1874 input1,
1875 shape0,
1876 output,
1877 1.0f,
1878 0);
1879}
1880
1881LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
1882 armnn::IWorkloadFactory& workloadFactory,
1883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1884{
1885 const unsigned int shape0[] = { 1, 2, 2, 3 };
1886 const unsigned int shape1[] = { 1, 1, 1, 3 };
1887
1888 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1889 7, 8, 9, 10, 11, 12 });
1890
1891 std::vector<uint8_t> input1({ 1, 1, 3});
1892
1893 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
1894 0, 0, 0, 0, 0, 0 });
1895
1896 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
1897 (workloadFactory,
1898 memoryManager,
1899 shape0,
1900 input0,
1901 shape1,
1902 input1,
1903 shape0,
1904 output,
1905 1.0f,
1906 0);
1907}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001908
FrancisMurtagh878f0232018-12-19 10:56:15 +00001909LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1911{
1912 const unsigned int width = 2;
1913 const unsigned int height = 2;
1914 const unsigned int channelCount = 2;
1915 const unsigned int batchSize = 2;
1916
1917 unsigned int shape[] = { batchSize, channelCount, height, width };
1918
1919 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1920 3, 3, 3, 3, 4, 4, 4, 4 });
1921
1922 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
1923 5, 5, 5, 5, 4, 4, 4, 4 });
1924
1925 std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1926 0, 0, 0, 0, 0, 0, 0, 0 });
1927
1928 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
1929 (workloadFactory,
1930 memoryManager,
1931 shape,
1932 input0,
1933 shape,
1934 input1,
1935 shape,
1936 output);
1937}
1938
1939LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
1940 armnn::IWorkloadFactory& workloadFactory,
1941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1942{
1943 unsigned int shape0[] = { 1, 2, 2, 2 };
1944 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1945
1946 unsigned int shape1[] = { 1, 1, 1, 1 };
1947 std::vector<float> input1({ 1 });
1948
1949 std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
1950
1951 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
1952 (workloadFactory,
1953 memoryManager,
1954 shape0,
1955 input0,
1956 shape1,
1957 input1,
1958 shape0,
1959 output);
1960}
1961
1962LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
1963 armnn::IWorkloadFactory& workloadFactory,
1964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1965{
1966 const unsigned int shape0[] = { 1, 2, 2, 3 };
1967 const unsigned int shape1[] = { 1, 1, 1, 3 };
1968
1969 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
1970 7, 8, 9, 10, 11, 12 });
1971
1972 std::vector<float> input1({ 1, 3, 2});
1973
1974 std::vector<float> output({ 0, 0, 1, 1, 1, 1,
1975 1, 1, 1, 1, 1, 1 });
1976
1977 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
1978 (workloadFactory,
1979 memoryManager,
1980 shape0,
1981 input0,
1982 shape1,
1983 input1,
1984 shape0,
1985 output);
1986}
1987
1988LayerTestResult<uint8_t, 4> GreaterUint8Test(
1989 armnn::IWorkloadFactory& workloadFactory,
1990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1991{
1992 unsigned int shape[] = { 2, 2, 2, 2 };
1993
1994 // See dequantized values to the right.
1995 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
1996 3, 3, 3, 3, 5, 5, 5, 5 });
1997
1998 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
1999 2, 2, 2, 2, 5, 5, 5, 5 });
2000
2001 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2002 1, 1, 1, 1, 0, 0, 0, 0 });
2003
2004 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
2005 (workloadFactory,
2006 memoryManager,
2007 shape,
2008 input0,
2009 shape,
2010 input1,
2011 shape,
2012 output,
2013 1.0f,
2014 0);
2015}
2016
2017LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2018 armnn::IWorkloadFactory& workloadFactory,
2019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2020{
2021 const unsigned int shape0[] = { 1, 2, 2, 3 };
2022 const unsigned int shape1[] = { 1, 1, 1, 1 };
2023
2024 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2025 7, 8, 9, 10, 11, 12 });
2026
2027 std::vector<uint8_t> input1({ 1 });
2028
2029 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2030 1, 1, 1, 1, 1, 1 });
2031
2032 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
2033 (workloadFactory,
2034 memoryManager,
2035 shape0,
2036 input0,
2037 shape1,
2038 input1,
2039 shape0,
2040 output,
2041 1.0f,
2042 0);
2043}
2044
2045LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2046 armnn::IWorkloadFactory& workloadFactory,
2047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2048{
2049 const unsigned int shape0[] = { 1, 2, 2, 3 };
2050 const unsigned int shape1[] = { 1, 1, 1, 3 };
2051
2052 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2053 7, 8, 9, 10, 11, 12 });
2054
2055 std::vector<uint8_t> input1({ 1, 1, 3});
2056
2057 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2058 1, 1, 1, 1, 1, 1 });
2059
2060 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
2061 (workloadFactory,
2062 memoryManager,
2063 shape0,
2064 input0,
2065 shape1,
2066 input1,
2067 shape0,
2068 output,
2069 1.0f,
2070 0);
2071}
2072
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002073LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2075{
2076 const unsigned int width = 2;
2077 const unsigned int height = 2;
2078 const unsigned int channelCount = 2;
2079 const unsigned int batchSize = 2;
2080
2081 unsigned int shape[] = { batchSize, channelCount, height, width };
2082
2083 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2084 3, 3, 3, 3, 4, 4, 4, 4 });
2085
2086 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2087 4, 4, 4, 4, 5, 5, 5, 5 });
2088
2089 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2090 4, 4, 4, 4, 5, 5, 5, 5 });
2091
2092 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
2093 (workloadFactory,
2094 memoryManager,
2095 shape,
2096 input0,
2097 shape,
2098 input1,
2099 shape,
2100 output);
2101}
2102
2103LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2104 armnn::IWorkloadFactory& workloadFactory,
2105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2106{
2107 unsigned int shape0[] = { 1, 2, 2, 2 };
2108 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2109
2110 unsigned int shape1[] = { 1, 1, 1, 1 };
2111 std::vector<float> input1({ 2 });
2112
2113 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2114
2115 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
2116 (workloadFactory,
2117 memoryManager,
2118 shape0,
2119 input0,
2120 shape1,
2121 input1,
2122 shape0,
2123 output);
2124}
2125
2126LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2127 armnn::IWorkloadFactory& workloadFactory,
2128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2129{
2130 const unsigned int shape0[] = { 1, 2, 2, 3 };
2131 const unsigned int shape1[] = { 1, 1, 1, 3 };
2132
2133 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2134 7, 8, 9, 10, 11, 12 });
2135
2136 std::vector<float> input1({ 1, 2, 3});
2137
2138 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
2139 7, 8, 9, 10, 11, 12 });
2140
2141 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
2142 (workloadFactory,
2143 memoryManager,
2144 shape0,
2145 input0,
2146 shape1,
2147 input1,
2148 shape0,
2149 output);
2150}
2151
2152LayerTestResult<uint8_t, 4> MaximumUint8Test(
2153 armnn::IWorkloadFactory& workloadFactory,
2154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2155{
2156 unsigned int shape[] = { 2, 2, 2, 2 };
2157
2158 // See dequantized values to the right.
2159 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2160 3, 3, 3, 3, 4, 4, 4, 4 });
2161
2162 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2163 4, 4, 4, 4, 5, 5, 5, 5 });
2164
2165 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2166 4, 4, 4, 4, 5, 5, 5, 5 });
2167
2168 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
2169 (workloadFactory,
2170 memoryManager,
2171 shape,
2172 input0,
2173 shape,
2174 input1,
2175 shape,
2176 output,
2177 1.0f,
2178 0);
2179}
2180
2181LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2182 armnn::IWorkloadFactory& workloadFactory,
2183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2184{
2185 const unsigned int shape0[] = { 1, 2, 2, 3 };
2186 const unsigned int shape1[] = { 1, 1, 1, 1 };
2187
2188 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2189 7, 8, 9, 10, 11, 12 });
2190
2191 std::vector<uint8_t> input1({2});
2192
2193 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2194 7, 8, 9, 10, 11, 12 });
2195
2196 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
2197 (workloadFactory,
2198 memoryManager,
2199 shape0,
2200 input0,
2201 shape1,
2202 input1,
2203 shape0,
2204 output,
2205 1.0f,
2206 0);
2207}
2208
2209LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2210 armnn::IWorkloadFactory& workloadFactory,
2211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2212{
2213 const unsigned int shape0[] = { 1, 2, 2, 3 };
2214 const unsigned int shape1[] = { 1, 1, 1, 3 };
2215
2216 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2217 7, 8, 9, 10, 11, 12 });
2218
2219 std::vector<uint8_t> input1({ 1, 10, 3});
2220
2221 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2222 7, 10, 9, 10, 11, 12 });
2223
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002224 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002225 (workloadFactory,
2226 memoryManager,
2227 shape0,
2228 input0,
2229 shape1,
2230 input1,
2231 shape0,
2232 output,
2233 1.0f,
2234 0);
2235}
2236
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002237LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2238 armnn::IWorkloadFactory& workloadFactory,
2239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2240{
2241 unsigned int shape0[] = { 1, 2, 2, 2 };
2242 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2243
2244 unsigned int shape1[] = { 1, 1, 1, 1 };
2245 std::vector<float> input1({ 2 });
2246
2247 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2248
2249 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
2250 memoryManager,
2251 shape0,
2252 input0,
2253 shape1,
2254 input1,
2255 shape0,
2256 output);
2257}
2258
2259
2260LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2261 armnn::IWorkloadFactory& workloadFactory,
2262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2263{
2264 unsigned int shape0[] = { 1, 2, 2, 2 };
2265 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2266
2267 unsigned int shape1[] = { 1, 1, 1, 1 };
2268 std::vector<float> input1({ 5 });
2269
2270 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2271
2272 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
2273 memoryManager,
2274 shape0,
2275 input0,
2276 shape1,
2277 input1,
2278 shape0,
2279 output);
2280}
2281
2282LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2283 armnn::IWorkloadFactory & workloadFactory,
2284 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2285{
2286 const unsigned int shape0[] = { 1, 2, 2, 3 };
2287 const unsigned int shape1[] = { 1, 1, 1, 3 };
2288
2289 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2290 7, 1, 2, 3, 4, 5 });
2291
2292 std::vector<uint8_t> input1({ 1, 2, 3});
2293
2294 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2295 1, 1, 2, 1, 2, 3 });
2296
2297 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
2298 memoryManager,
2299 shape0,
2300 input0,
2301 shape1,
2302 input1,
2303 shape0,
2304 output,
2305 1.0f,
2306 0);
2307}
2308
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002309namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002310LayerTestResult<float,4> MultiplicationTestHelper(
2311 armnn::IWorkloadFactory& workloadFactory,
2312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2313 const unsigned int shape0[4],
2314 const std::vector<float> & values0,
2315 const unsigned int shape1[4],
2316 const std::vector<float> & values1,
2317 const unsigned int outShape[4],
2318 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002319{
surmeh01bceff2f2018-03-29 16:29:27 +01002320 const size_t dimensionCount = 4;
2321 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2322 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2323 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002324
surmeh01bceff2f2018-03-29 16:29:27 +01002325 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2326 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002327
2328 LayerTestResult<float,4> ret(outputTensorInfo);
2329
2330 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2331 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2332 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2333
2334 armnn::MultiplicationQueueDescriptor data;
2335 armnn::WorkloadInfo info;
2336 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2337 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2338 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2339
2340 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2341
2342 inputHandle0->Allocate();
2343 inputHandle1->Allocate();
2344 outputHandle->Allocate();
2345
2346 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2347 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2348
2349 workload->Execute();
2350
2351 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2352
surmeh01bceff2f2018-03-29 16:29:27 +01002353 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002354 return ret;
2355}
surmeh01bceff2f2018-03-29 16:29:27 +01002356} // anonymous namespace
2357
2358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002359LayerTestResult<float,4> MultiplicationTest(
2360 armnn::IWorkloadFactory& workloadFactory,
2361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002362{
2363 const unsigned int width = 2;
2364 const unsigned int height = 2;
2365 const unsigned int channelCount = 2;
2366 const unsigned int batchSize = 2;
2367
2368 unsigned int shape[] = { batchSize, channelCount, height, width };
2369
2370 std::vector<float> input0({
2371 1, 1, 1, 1, 2, 2, 2, 2,
2372 3, 3, 3, 3, 4, 4, 4, 4 });
2373
2374 std::vector<float> input1({
2375 2, 2, 2, 2, 3, 3, 3, 3,
2376 4, 4, 4, 4, 5, 5, 5, 5 });
2377
2378 std::vector<float> output({
2379 2, 2, 2, 2, 6, 6, 6, 6,
2380 12, 12, 12, 12, 20, 20, 20, 20 });
2381
2382 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002383 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002384 shape,
2385 input0,
2386 shape,
2387 input1,
2388 shape,
2389 output);
2390}
2391
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002392LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2393 armnn::IWorkloadFactory& workloadFactory,
2394 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002395{
2396 unsigned int shape0[] = { 1, 2, 2, 2 };
2397 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2398
2399 unsigned int shape1[] = { 1, 1, 1, 1 };
2400 std::vector<float> input1({ 2 });
2401
2402 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2403
2404 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002405 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002406 shape0,
2407 input0,
2408 shape1,
2409 input1,
2410 shape0,
2411 output);
2412}
2413
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002414LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2415 armnn::IWorkloadFactory& workloadFactory,
2416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002417{
2418 unsigned int shape0[] = { 1, 3, 3, 2 };
2419 std::vector<float> input0({
2420 1, 2, 3, 4, 5, 6,
2421 7, 8, 9, 10, 11, 12,
2422 13, 14, 15, 16, 17, 18});
2423
2424 unsigned int shape1[] = { 1, 1, 1, 2 };
2425 std::vector<float> input1({ 1, 2 });
2426
2427 std::vector<float> output({
2428 1, 4, 3, 8, 5, 12,
2429 7, 16, 9, 20, 11, 24,
2430 13, 28, 15, 32, 17, 36});
2431
2432 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002433 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002434 shape0,
2435 input0,
2436 shape1,
2437 input1,
2438 shape0,
2439 output);
2440}
telsoa014fcda012018-03-09 14:13:49 +00002441
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002442LayerTestResult<float,4> CompareMultiplicationTest(
2443 armnn::IWorkloadFactory& workloadFactory,
2444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2445 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002446{
2447 const unsigned int width = 16;
2448 const unsigned int height = 32;
2449 const unsigned int channelCount = 2;
2450 const unsigned int batchSize = 5;
2451
2452 armnn::TensorInfo inputTensorInfo0;
2453 armnn::TensorInfo inputTensorInfo1;
2454 armnn::TensorInfo outputTensorInfo;
2455
2456 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2457
2458 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2459 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2460 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2461
2462 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2463
2464 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2465 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2466
2467 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2468 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2469 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2470
2471 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2472 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2473 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2474
2475 armnn::MultiplicationQueueDescriptor data;
2476 armnn::WorkloadInfo info;
2477 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2478 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2479 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2480
2481 armnn::MultiplicationQueueDescriptor refData = data;
2482 armnn::WorkloadInfo refInfo = info;
2483 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2484 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2485 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2486
2487 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2488 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2489
2490 inputHandle0->Allocate();
2491 inputHandle1->Allocate();
2492 outputHandle->Allocate();
2493 inputHandle0Ref->Allocate();
2494 inputHandle1Ref->Allocate();
2495 outputHandleRef->Allocate();
2496
2497 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2498 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2499 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2500 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2501
2502 workload->Execute();
2503 workloadRef->Execute();
2504
2505 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2506 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2507
2508 return comparisonResult;
2509}
2510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002511LayerTestResult<float,4> CompareBatchNormTest(
2512 armnn::IWorkloadFactory& workloadFactory,
2513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2514 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002515{
2516 const unsigned int width = 2;
2517 const unsigned int height = 3;
2518 const unsigned int channels = 5;
2519 const unsigned int batchSize = 3;
2520
2521 armnn::TensorInfo inputTensorInfo;
2522 armnn::TensorInfo outputTensorInfo;
2523 armnn::TensorInfo tensorInfo;
2524
2525 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2526 constexpr unsigned int tensorShape[] = {channels};
2527
2528 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2529 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2530 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2531
2532 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2533
2534 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2535 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2536 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2537 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2538
2539 LayerTestResult<float,4> ret(outputTensorInfo);
2540
2541 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2542 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2543
2544 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2545 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2546
2547 armnn::BatchNormalizationQueueDescriptor data;
2548 armnn::WorkloadInfo info;
2549 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2550 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2551 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2552 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2553
2554 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2555 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2556 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2557 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2558
2559 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2560 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2561 data.m_Mean = &meanTensor;
2562 data.m_Variance = &varianceTensor;
2563 data.m_Beta = &betaTensor;
2564 data.m_Gamma = &gammaTensor;
2565 data.m_Parameters.m_Eps = 0.01f;
2566
2567 armnn::BatchNormalizationQueueDescriptor refData = data;
2568 armnn::WorkloadInfo refInfo = info;
2569 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2570 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2571
2572 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2573 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2574
2575 inputHandle->Allocate();
2576 outputHandle->Allocate();
2577 inputHandleRef->Allocate();
2578 outputHandleRef->Allocate();
2579
2580 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2581 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
2582
2583 workload->Execute();
2584 workloadRef->Execute();
2585
2586 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2587 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2588
2589 return ret;
2590}
2591
surmeh013537c2c2018-05-18 16:31:43 +01002592template<typename T>
2593void PermuteTensorData(
2594 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002596 const armnn::PermutationVector& mappings,
2597 armnn::TensorInfo & inputTensorInfo,
2598 const T * inputData,
2599 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00002600{
surmeh013537c2c2018-05-18 16:31:43 +01002601 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
2602 if (inputData == nullptr)
2603 {
2604 // Nullptr is an error in the test. By returning without doing the concatenation
2605 // I expect the caller to fail the test. It still makes sense to report this as
2606 // an assert for Debug builds.
2607 return;
2608 }
telsoa014fcda012018-03-09 14:13:49 +00002609
surmeh013537c2c2018-05-18 16:31:43 +01002610 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
2611
2612 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2613 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2614
2615 armnn::PermuteQueueDescriptor queueDescriptor;
2616 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
2617 armnn::WorkloadInfo workloadInfo;
2618 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
2619 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2620
2621 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
2622
2623 inputHandle->Allocate();
2624 outputHandle->Allocate();
2625
2626 CopyDataToITensorHandle(inputHandle.get(), inputData);
2627
2628 workload->Execute();
2629
2630 outputData.resize(outputTensorInfo.GetNumElements());
2631 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
2632 inputTensorInfo = outputTensorInfo;
2633}
2634
2635armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
2636 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2637 unsigned int concatDim)
2638{
telsoa014fcda012018-03-09 14:13:49 +00002639 std::vector<armnn::TensorShape> shapes;
2640 shapes.reserve(inputTensorInfos.size());
2641 for (const armnn::TensorInfo& it: inputTensorInfos)
2642 {
2643 shapes.push_back(it.GetShape());
2644 }
surmeh013537c2c2018-05-18 16:31:43 +01002645
2646 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
2647 shapes.end(),
2648 concatDim);
2649}
2650
2651//
narpra015cdda352018-11-19 15:30:27 +00002652// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
2653// In case of <4 dimensions we need to make sure that the concat dimensions are at least
2654// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01002655//
2656
2657bool NeedPermuteForConcat(
2658 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2659 unsigned int concatDim)
2660{
2661 // See note above. Additionally we expect the input shapes to have the
2662 // same number of dimensions.
2663 unsigned int nDimensions = 0;
2664
telsoa01c577f2c2018-08-31 09:22:23 +01002665 // Determine the number of dimensions as well as sanity check them
2666 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002667 for (auto && tensorInfo : inputTensorInfos)
2668 {
2669 if (!nDimensions)
2670 {
2671 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2672 }
2673 else
2674 {
2675 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2676 "Input shapes must have the same number of dimensions");
2677 }
2678 }
2679
narpra015cdda352018-11-19 15:30:27 +00002680 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01002681}
2682
2683armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2684{
2685 unsigned int numDims = inputShape.GetNumDimensions();
2686 if (numDims >= 3)
2687 {
2688 // Nothing to do if the inputShape has at least 3 dimensions.
2689 return inputShape;
2690 }
2691
2692 std::vector<unsigned int> newDims(size_t(3), 1u);
2693 unsigned int expandedBy = 3 - numDims;
2694 for (unsigned int i=0; i<numDims; ++i)
2695 {
2696 newDims[expandedBy+i] = inputShape[i];
2697 }
2698 return armnn::TensorShape(3u, &newDims[0]);
2699}
2700
2701void Generate3dPermuteVectorForConcat(
2702 unsigned int numDimensions,
2703 unsigned int & concatDim,
2704 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2705{
2706 BOOST_ASSERT_MSG(numDimensions <= 3,
2707 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01002708 unsigned int expandedBy = 3 - numDimensions;
2709 unsigned int expandedConcatAxis = concatDim + expandedBy;
2710
2711 if (expandedConcatAxis == 2)
2712 {
2713 concatDim = 0;
2714 armnn::PermutationVector forwardPermutation({1, 2, 0});
2715 armnn::PermutationVector reversePermutation({2, 0, 1});
2716 permutations = std::make_pair(forwardPermutation, reversePermutation);
2717 }
2718 else if (expandedConcatAxis == 1)
2719 {
2720 concatDim = 0;
2721 armnn::PermutationVector forwardPermutation({2, 0, 1});
2722 armnn::PermutationVector reversePermutation({1, 2, 0});
2723 permutations = std::make_pair(forwardPermutation, reversePermutation);
2724 }
2725 else
2726 {
2727 BOOST_ASSERT(expandedConcatAxis == 0);
2728 concatDim = 0;
2729 }
2730}
2731
2732//
2733// Permute the input tensors so we can do a supported concatenation.
2734// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2735// at the front. Finally this function tells what the output shape
2736// of the permuted concatenated tensor is going to be.
2737//
2738template <typename T>
2739void PermuteInputsForConcat(
2740 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002742 std::vector<armnn::TensorInfo> & inputTensorInfos,
2743 std::vector<T *> & inputData,
2744 std::vector<std::vector<T>> & inputDataStorage,
2745 armnn::PermutationVector & permuteVector,
2746 unsigned int & concatDim,
2747 armnn::TensorInfo & outputTensorInfo)
2748{
2749 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2750 "Expecting more than one tensor to be concatenated here");
2751
2752 unsigned int numDims = 0;
2753 unsigned int nthInput = 0;
2754 const armnn::PermutationVector identity({0, 1, 2});
2755
2756 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2757 std::make_pair(identity, identity);
2758
2759 inputDataStorage.resize(inputData.size());
2760
2761 for (auto && tensorInfo : inputTensorInfos)
2762 {
2763 if (numDims == 0)
2764 {
2765 numDims = tensorInfo.GetShape().GetNumDimensions();
2766 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00002767
telsoa01c577f2c2018-08-31 09:22:23 +01002768 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002769 permuteVector = permutations.second;
2770 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2771 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2772 }
2773 else
2774 {
2775 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2776 "All inputs must have the same number of dimensions");
2777 }
2778
2779 armnn::TensorInfo newTensorInfo = tensorInfo;
2780 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2781
2782 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002783 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002784 permutations.first,
2785 newTensorInfo,
2786 inputData[nthInput],
2787 inputDataStorage[nthInput]);
2788
2789 inputData[nthInput] = inputDataStorage[nthInput].data();
2790 inputTensorInfos[nthInput] = newTensorInfo;
2791
2792 ++nthInput;
2793 }
2794
2795 outputTensorInfo.SetShape(
2796 armnnUtils::Permuted(
2797 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2798 permutations.first));
2799}
2800
2801
2802//
2803// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002804// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002805// output.
2806//
2807template <typename T>
2808void PermuteOutputForConcat(
2809 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002810 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002811 const armnn::TensorInfo & tensorInfo,
2812 const armnn::PermutationVector & permuteVector,
2813 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2814 T * data)
2815{
2816 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2817 if (data == nullptr)
2818 {
2819 // Nullptr is an error in the test. By returning without doing the permutation
2820 // I expect the caller to fail the test. It still makes sense to report this as
2821 // an assert for Debug builds.
2822 return;
2823 }
2824
2825 armnn::TensorInfo resultTensorInfo = tensorInfo;
2826 std::vector<T> inputData(tensorInfo.GetNumElements());
2827 std::vector<T> outputData;
2828
2829 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2830
2831 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002832 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002833 permuteVector,
2834 resultTensorInfo,
2835 &inputData[0],
2836 outputData);
2837
2838 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2839}
2840
2841template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002842void Concatenate(
2843 armnn::IWorkloadFactory& workloadFactory,
2844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2845 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2846 std::initializer_list<T *> inputsOrig,
2847 const armnn::TensorInfo& outputTensorInfoOrig,
2848 T * output,
narpra015cdda352018-11-19 15:30:27 +00002849 unsigned int concatDim,
2850 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01002851{
2852 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2853 if (output == nullptr)
2854 {
2855 // Nullptr is an error in the test. By returning without doing the permutation
2856 // I expect the caller to fail the test. It still makes sense to report this as
2857 // an assert for Debug builds.
2858 return;
2859 }
2860
telsoa01c577f2c2018-08-31 09:22:23 +01002861 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002862 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2863 std::vector<T *> inputs = inputsOrig;
2864 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2865
2866 armnn::PermutationVector permuteVector{0, 1, 2};
2867
telsoa01c577f2c2018-08-31 09:22:23 +01002868 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002869 std::vector<std::vector<T>> tmpInputDataStorage;
2870
2871 const size_t inputCount = inputTensorInfos.size();
2872
2873 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2874
2875 if (needPermuteForConcat)
2876 {
2877 //
2878 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002879 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002880 //
2881 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002882 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002883 inputTensorInfos,
2884 inputs,
2885 tmpInputDataStorage,
2886 permuteVector,
2887 concatDim,
2888 outputTensorInfo);
2889 }
2890
narpra015cdda352018-11-19 15:30:27 +00002891 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00002892
2893 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2894 inputHandles.reserve(inputCount);
2895
narpra015cdda352018-11-19 15:30:27 +00002896 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2897
2898 armnn::MergerQueueDescriptor queueDescriptor;
2899 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
2900 queueDescriptor.m_Parameters = viewsDescriptor;
2901
2902 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00002903 {
narpra015cdda352018-11-19 15:30:27 +00002904 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2905 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2906 {
2907 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2908 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2909 }
telsoa014fcda012018-03-09 14:13:49 +00002910
narpra015cdda352018-11-19 15:30:27 +00002911 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00002912
narpra015cdda352018-11-19 15:30:27 +00002913 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2914 for (unsigned int i = 0; i < inputCount; ++i)
2915 {
2916 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
2917 std::unique_ptr<armnn::ITensorHandle> inputHandle =
2918 subTensorsSupported ?
2919 workloadFactory.CreateSubTensorHandle(*outputHandle,
2920 inputTensorInfo.GetShape(),
2921 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
2922 workloadFactory.CreateTensorHandle(inputTensorInfo);
2923
2924 inputHandles.emplace_back(std::move(inputHandle));
2925 }
2926
telsoa014fcda012018-03-09 14:13:49 +00002927 }
narpra015cdda352018-11-19 15:30:27 +00002928 else
2929 {
2930 for (unsigned int i = 0; i < inputCount; ++i)
2931 {
2932 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
2933 inputHandles.emplace_back(std::move(inputHandle));
2934 }
2935 }
telsoa014fcda012018-03-09 14:13:49 +00002936
2937 for (unsigned int i = 0; i < inputCount; ++i)
2938 {
surmeh013537c2c2018-05-18 16:31:43 +01002939 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002940 }
2941
2942 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2943
2944 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2945
2946 for (auto& inputHandle : inputHandles)
2947 {
2948 inputHandle->Allocate();
2949 }
2950
2951 outputHandle->Allocate();
2952
2953 unsigned int nextInputId = 0;
2954 for (auto& inputHandle : inputHandles)
2955 {
surmeh013537c2c2018-05-18 16:31:43 +01002956 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2957 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002958 }
2959
2960 workload->Execute();
2961
surmeh013537c2c2018-05-18 16:31:43 +01002962 if (needPermuteForConcat)
2963 {
2964 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002965 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002966 outputTensorInfo,
2967 permuteVector,
2968 std::move(outputHandle),
2969 output);
2970 }
2971 else
2972 {
2973 CopyDataFromITensorHandle(output, outputHandle.get());
2974 }
telsoa014fcda012018-03-09 14:13:49 +00002975}
2976
2977template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002978LayerTestResult<T, 1> Concatenation1dTestImpl(
2979 armnn::IWorkloadFactory& workloadFactory,
2980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2981 float qScale,
2982 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002983{
2984 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2985
2986 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2987 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2988 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2989
2990 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2991
2992 LayerTestResult<T, 1> result(outputTensorInfo);
2993
2994 std::vector<T> output;
2995 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002996 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002997 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2998 { input0.data(), input1.data(), input2.data() },
2999 outputTensorInfo,
3000 output.data(),
3001 0,
3002 true);
telsoa014fcda012018-03-09 14:13:49 +00003003
3004 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3005 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3006 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3007 }));
3008
3009 return result;
3010}
3011
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003012LayerTestResult<float, 1> Concatenation1dTest(
3013 armnn::IWorkloadFactory& workloadFactory,
3014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003015{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003016 return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003017}
3018
3019template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003020LayerTestResult<T, 2> Concatenation2dTestImpl(
3021 armnn::IWorkloadFactory& workloadFactory,
3022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003023 const armnn::TensorInfo& outputTensorInfo,
3024 unsigned int dimension,
3025 const float qScale,
3026 const int32_t qOffset)
3027{
3028 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
3029
3030 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3031 // Batch 0
3032 1.0f, 2.0f, 3.0f,
3033
3034 // Batch 1
3035 10.0f, 11.0f, 12.0f,
3036 }));
3037
3038 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3039 // Batch 0
3040 4.0f, 5.0f, 6.0f,
3041
3042 // Batch 1
3043 13.0f, 14.0f, 15.0f,
3044 }));
3045
3046 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3047 // Batch 0
3048 7.0f, 8.0f, 9.0f,
3049
3050 // Batch 1
3051 16.0f, 17.0f, 18.0f,
3052 }));
3053
3054 LayerTestResult<T, 2> result(outputTensorInfo);
3055
3056 std::vector<T> output;
3057 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003058 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003059 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3060 { input0.data(), input1.data(), input2.data() },
3061 outputTensorInfo,
3062 output.data(),
3063 dimension,
3064 true);
telsoa014fcda012018-03-09 14:13:49 +00003065
3066 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3067 return result;
3068}
3069
3070template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003071LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3072 armnn::IWorkloadFactory& workloadFactory,
3073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3074 float qScale,
3075 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003076{
3077 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
3078
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003079 LayerTestResult<T, 2> result =
3080 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003081 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3082 // Batch 0
3083 1.0f, 2.0f, 3.0f,
3084
3085 // Batch 1
3086 10.0f, 11.0f, 12.0f,
3087
3088 // Batch 2
3089 4.0f, 5.0f, 6.0f,
3090
3091 // Batch 3
3092 13.0f, 14.0f, 15.0f,
3093
3094 // Batch 4
3095 7.0f, 8.0f, 9.0f,
3096
3097 // Batch 5
3098 16.0f, 17.0f, 18.0f,
3099 }));
3100
3101 return result;
3102}
3103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003104LayerTestResult<float, 2> Concatenation2dDim0Test(
3105 armnn::IWorkloadFactory& workloadFactory,
3106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003107{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003108 return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003109}
3110
3111template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003112LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3113 armnn::IWorkloadFactory& workloadFactory,
3114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3115 float qScale,
3116 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003117{
3118 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
3119
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003120 LayerTestResult<T, 2> result =
3121 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003122 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3123 // Batch 0
3124 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3125
3126 // Batch 1
3127 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3128 }));
3129
3130 return result;
3131}
3132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003133LayerTestResult<float, 2> Concatenation2dDim1Test(
3134 armnn::IWorkloadFactory& workloadFactory,
3135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003136{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003137 return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003138}
3139
3140template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003141LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3142 armnn::IWorkloadFactory& workloadFactory,
3143 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3144 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003145 int32_t qOffset)
3146{
3147 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
3148 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3149 // Batch 0
3150 1.0f, 2.0f, 3.0f,
3151
3152 // Batch 1
3153 10.0f, 11.0f, 12.0f,
3154 }));
3155
3156 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
3157 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3158 // Batch 0
3159 4.0f, 5.0f, 6.0f,
3160
3161 // Batch 1
3162 13.0f, 14.0f, 15.0f,
3163
3164 // Batch 0
3165 7.0f, 8.0f, 9.0f,
3166 }));
3167
3168 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
3169 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3170 // Batch 1
3171 16.0f, 17.0f, 18.0f,
3172 }));
3173
3174 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
3175 LayerTestResult<T, 2> result(outputTensorInfo);
3176
3177 std::vector<T> output;
3178 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003179 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003180 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3181 { input0.data(), input1.data(), input2.data() },
3182 outputTensorInfo,
3183 output.data(),
3184 0,
3185 true);
telsoa014fcda012018-03-09 14:13:49 +00003186
3187 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3188 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3189 // Batch 0
3190 1.0f, 2.0f, 3.0f,
3191
3192 // Batch 1
3193 10.0f, 11.0f, 12.0f,
3194
3195 // Batch 2
3196 4.0f, 5.0f, 6.0f,
3197
3198 // Batch 3
3199 13.0f, 14.0f, 15.0f,
3200
3201 // Batch 4
3202 7.0f, 8.0f, 9.0f,
3203
3204 // Batch 5
3205 16.0f, 17.0f, 18.0f,
3206 }));
3207
3208 return result;
3209}
3210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003211LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3212 armnn::IWorkloadFactory& workloadFactory,
3213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003214{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003215 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003216}
3217
3218template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003219LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3220 armnn::IWorkloadFactory& workloadFactory,
3221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3222 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003223 int32_t qOffset)
3224{
3225 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
3226 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3227 // Batch 0
3228 1.0f, 2.0f, 3.0f,
3229
3230 // Batch 1
3231 10.0f, 11.0f, 12.0f,
3232 }));
3233
3234 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
3235 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3236 // Batch 0
3237 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3238
3239 // Batch 1
3240 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3241 }));
3242
3243 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
3244 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3245 // Batch 0
3246 9.0f,
3247
3248 // Batch 1
3249 18.0f
3250 }));
3251
3252 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
3253 LayerTestResult<T, 2> result(outputTensorInfo);
3254
3255 std::vector<T> output;
3256 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003257 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003258 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3259 { input0.data(), input1.data(), input2.data() },
3260 outputTensorInfo,
3261 output.data(),
3262 1,
3263 true);
telsoa014fcda012018-03-09 14:13:49 +00003264
3265 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3266 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3267 // Batch 0
3268 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3269
3270 // Batch 1
3271 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3272 }));
3273
3274 return result;
3275}
3276
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003277LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3278 armnn::IWorkloadFactory& workloadFactory,
3279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003280{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003281 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003282}
3283
3284template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003285LayerTestResult<T, 3> Concatenation3dTestImpl(
3286 armnn::IWorkloadFactory& workloadFactory,
3287 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003288 const armnn::TensorInfo& outputTensorInfo,
3289 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003290 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003291 float qScale,
3292 int32_t qOffset)
3293{
3294 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3295
3296 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3297 // Batch 0, Channel 0
3298 1.0f, 2.0f,
3299
3300 // Batch 0, Channel 1
3301 3.0f, 4.0f,
3302
3303 // Batch 0, Channel 2
3304 5.0f, 6.0f,
3305
3306 // Batch 1, Channel 0
3307 19.0f, 20.0f,
3308
3309 // Batch 1, Channel 1
3310 21.0f, 22.0f,
3311
3312 // Batch 1, Channel 2
3313 23.0f, 24.0f
3314 }));
3315
3316 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3317 // Batch 0, Channel 0
3318 7.0f, 8.0f,
3319
3320 // Batch 0, Channel 1
3321 9.0f, 10.0f,
3322
3323 // Batch 0, Channel 2
3324 11.0f, 12.0f,
3325
3326 // Batch 1, Channel 0
3327 25.0f, 26.0f,
3328
3329 // Batch 1, Channel 1
3330 27.0f, 28.0f,
3331
3332 // Batch 1, Channel 2
3333 29.0f, 30.0f
3334 }));
3335
3336 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3337 // Batch 0, Channel 0
3338 13.0f, 14.0f,
3339
3340 // Batch 0, Channel 1
3341 15.0f, 16.0f,
3342
3343 // Batch 0, Channel 2
3344 17.0f, 18.0f,
3345
3346 // Batch 1, Channel 0
3347 31.0f, 32.0f,
3348
3349 // Batch 1, Channel 1
3350 33.0f, 34.0f,
3351
3352 // Batch 1, Channel 2
3353 35.0f, 36.0f
3354 }));
3355
3356 LayerTestResult<T, 3> result(outputTensorInfo);
3357
3358 std::vector<T> output;
3359 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003360 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003361 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3362 { input0.data(), input1.data(), input2.data() },
3363 outputTensorInfo,
3364 output.data(),
3365 dimension,
3366 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003367
3368 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3369 return result;
3370}
3371
3372template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003373LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3374 armnn::IWorkloadFactory& workloadFactory,
3375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3376 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003377 int32_t qOffset)
3378{
3379 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3380
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003381 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003382 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003383 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3384 // Batch 0, Channel 0
3385 1.0f, 2.0f,
3386
3387 // Batch 0, Channel 1
3388 3.0f, 4.0f,
3389
3390 // Batch 0, Channel 2
3391 5.0f, 6.0f,
3392
3393 // Batch 1, Channel 0
3394 19.0f, 20.0f,
3395
3396 // Batch 1, Channel 1
3397 21.0f, 22.0f,
3398
3399 // Batch 1, Channel 2
3400 23.0f, 24.0f,
3401
3402 // Batch 2, Channel 0
3403 7.0f, 8.0f,
3404
3405 // Batch 2, Channel 1
3406 9.0f, 10.0f,
3407
3408 // Batch 2, Channel 2
3409 11.0f, 12.0f,
3410
3411 // Batch 3, Channel 0
3412 25.0f, 26.0f,
3413
3414 // Batch 3, Channel 1
3415 27.0f, 28.0f,
3416
3417 // Batch 3, Channel 2
3418 29.0f, 30.0f,
3419
3420 // Batch 4, Channel 0
3421 13.0f, 14.0f,
3422
3423 // Batch 4, Channel 1
3424 15.0f, 16.0f,
3425
3426 // Batch 4, Channel 2
3427 17.0f, 18.0f,
3428
3429 // Batch 5, Channel 0
3430 31.0f, 32.0f,
3431
3432 // Batch 5, Channel 1
3433 33.0f, 34.0f,
3434
3435 // Batch 5, Channel 2
3436 35.0f, 36.0f
3437 }));
narpra015cdda352018-11-19 15:30:27 +00003438
telsoa014fcda012018-03-09 14:13:49 +00003439 return result;
3440}
3441
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003442LayerTestResult<float, 3> Concatenation3dDim0Test(
3443 armnn::IWorkloadFactory& workloadFactory,
3444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003445{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003446 return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003447}
3448
3449template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003450LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3451 armnn::IWorkloadFactory& workloadFactory,
3452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3453 float qScale,
3454 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003455{
3456 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
3457
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003458 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003459 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
3460
telsoa014fcda012018-03-09 14:13:49 +00003461 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3462 // Batch 0, Channel 0
3463 1.0f, 2.0f,
3464
3465 // Batch 0, Channel 1
3466 3.0f, 4.0f,
3467
3468 // Batch 0, Channel 2
3469 5.0f, 6.0f,
3470
3471 // Batch 0, Channel 3
3472 7.0f, 8.0f,
3473
3474 // Batch 0, Channel 4
3475 9.0f, 10.0f,
3476
3477 // Batch 0, Channel 5
3478 11.0f, 12.0f,
3479
3480 // Batch 0, Channel 6
3481 13.0f, 14.0f,
3482
3483 // Batch 0, Channel 7
3484 15.0f, 16.0f,
3485
3486 // Batch 0, Channel 8
3487 17.0f, 18.0f,
3488
3489 // Batch 1, Channel 0
3490 19.0f, 20.0f,
3491
3492 // Batch 1, Channel 1
3493 21.0f, 22.0f,
3494
3495 // Batch 1, Channel 2
3496 23.0f, 24.0f,
3497
3498 // Batch 1, Channel 3
3499 25.0f, 26.0f,
3500
3501 // Batch 1, Channel 4
3502 27.0f, 28.0f,
3503
3504 // Batch 1, Channel 5
3505 29.0f, 30.0f,
3506
3507 // Batch 1, Channel 6
3508 31.0f, 32.0f,
3509
3510 // Batch 1, Channel 7
3511 33.0f, 34.0f,
3512
3513 // Batch 1, Channel 8
3514 35.0f, 36.0f
3515 }));
3516
3517 return result;
3518}
3519
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003520LayerTestResult<float, 3> Concatenation3dDim1Test(
3521 armnn::IWorkloadFactory& workloadFactory,
3522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003523{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003524 return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003525}
3526
3527template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003528LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3529 armnn::IWorkloadFactory& workloadFactory,
3530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003531 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003532 float qScale,
3533 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003534{
3535 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3536
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003537 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003538 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
3539
telsoa014fcda012018-03-09 14:13:49 +00003540 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3541 // Batch 0, Channel 0
3542 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3543
3544 // Batch 0, Channel 1
3545 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3546
3547 // Batch 0, Channel 2
3548 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3549
3550 // Batch 1, Channel 0
3551 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3552
3553 // Batch 1, Channel 1
3554 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3555
3556 // Batch 1, Channel 2
3557 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3558 }));
3559
3560 return result;
3561}
3562
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003563LayerTestResult<float, 3> Concatenation3dDim2Test(
3564 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3566 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003567{
narpra015cdda352018-11-19 15:30:27 +00003568 return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003569}
3570
3571template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003572LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
3573 armnn::IWorkloadFactory& workloadFactory,
3574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3575 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003576 int32_t qOffset)
3577{
3578 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3579 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3580 // Batch 0, Channel 0
3581 1.0f, 2.0f,
3582
3583 // Batch 0, Channel 1
3584 3.0f, 4.0f,
3585
3586 // Batch 0, Channel 2
3587 5.0f, 6.0f,
3588
3589 // Batch 1, Channel 0
3590 19.0f, 20.0f,
3591
3592 // Batch 1, Channel 1
3593 21.0f, 22.0f,
3594
3595 // Batch 1, Channel 2
3596 23.0f, 24.0f
3597 }));
3598
3599 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
3600 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3601 // Batch 0, Channel 0
3602 7.0f, 8.0f,
3603
3604 // Batch 0, Channel 1
3605 9.0f, 10.0f,
3606
3607 // Batch 0, Channel 2
3608 11.0f, 12.0f,
3609 }));
3610
3611 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
3612 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3613 // Batch 0, Channel 0
3614 25.0f, 26.0f,
3615
3616 // Batch 0, Channel 1
3617 27.0f, 28.0f,
3618
3619 // Batch 0, Channel 2
3620 29.0f, 30.0f,
3621
3622 // Batch 1, Channel 0
3623 13.0f, 14.0f,
3624
3625 // Batch 1, Channel 1
3626 15.0f, 16.0f,
3627
3628 // Batch 1, Channel 2
3629 17.0f, 18.0f,
3630
3631 // Batch 2, Channel 0
3632 31.0f, 32.0f,
3633
3634 // Batch 2, Channel 1
3635 33.0f, 34.0f,
3636
3637 // Batch 2, Channel 2
3638 35.0f, 36.0f
3639 }));
3640
3641 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3642 LayerTestResult<T, 3> result(outputTensorInfo);
3643
3644 std::vector<T> output;
3645 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003646 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003647 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3648 { input0.data(), input1.data(), input2.data() },
3649 outputTensorInfo,
3650 output.data(),
3651 0,
3652 true);
telsoa014fcda012018-03-09 14:13:49 +00003653
3654 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3655 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3656 // Batch 0, Channel 0
3657 1.0f, 2.0f,
3658
3659 // Batch 0, Channel 1
3660 3.0f, 4.0f,
3661
3662 // Batch 0, Channel 2
3663 5.0f, 6.0f,
3664
3665 // Batch 1, Channel 0
3666 19.0f, 20.0f,
3667
3668 // Batch 1, Channel 1
3669 21.0f, 22.0f,
3670
3671 // Batch 1, Channel 2
3672 23.0f, 24.0f,
3673
3674 // Batch 2, Channel 0
3675 7.0f, 8.0f,
3676
3677 // Batch 2, Channel 1
3678 9.0f, 10.0f,
3679
3680 // Batch 2, Channel 2
3681 11.0f, 12.0f,
3682
3683 // Batch 3, Channel 0
3684 25.0f, 26.0f,
3685
3686 // Batch 3, Channel 1
3687 27.0f, 28.0f,
3688
3689 // Batch 3, Channel 2
3690 29.0f, 30.0f,
3691
3692 // Batch 4, Channel 0
3693 13.0f, 14.0f,
3694
3695 // Batch 4, Channel 1
3696 15.0f, 16.0f,
3697
3698 // Batch 4, Channel 2
3699 17.0f, 18.0f,
3700
3701 // Batch 5, Channel 0
3702 31.0f, 32.0f,
3703
3704 // Batch 5, Channel 1
3705 33.0f, 34.0f,
3706
3707 // Batch 5, Channel 2
3708 35.0f, 36.0f
3709 }));
3710
3711 return result;
3712}
3713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003714LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3715 armnn::IWorkloadFactory& workloadFactory,
3716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003717{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003718 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003719}
3720
3721template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003722LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3723 armnn::IWorkloadFactory& workloadFactory,
3724 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3725 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003726 int32_t qOffset)
3727{
3728 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3729 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3730 // Batch 0, Channel 0
3731 1.0f, 2.0f,
3732
3733 // Batch 0, Channel 1
3734 3.0f, 4.0f,
3735
3736 // Batch 0, Channel 2
3737 5.0f, 6.0f,
3738
3739 // Batch 1, Channel 0
3740 19.0f, 20.0f,
3741
3742 // Batch 1, Channel 1
3743 21.0f, 22.0f,
3744
3745 // Batch 1, Channel 2
3746 23.0f, 24.0f
3747 }));
3748
3749 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
3750 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3751 // Batch 0, Channel 0
3752 7.0f, 8.0f,
3753
3754 // Batch 0, Channel 1
3755 9.0f, 10.0f,
3756
3757 // Batch 0, Channel 2
3758 11.0f, 12.0f,
3759
3760 // Batch 0, Channel 3
3761 25.0f, 26.0f,
3762
3763 // Batch 1, Channel 0
3764 27.0f, 28.0f,
3765
3766 // Batch 1, Channel 1
3767 29.0f, 30.0f,
3768
3769 // Batch 1, Channel 2
3770 13.0f, 14.0f,
3771
3772 // Batch 1, Channel 3
3773 15.0f, 16.0f,
3774 }));
3775
3776 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
3777 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3778 // Batch 0, Channel 0
3779 17.0f, 18.0f,
3780
3781 // Batch 1, Channel 0
3782 31.0f, 32.0f,
3783 }));
3784
3785 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
3786 LayerTestResult<T, 3> result(outputTensorInfo);
3787
3788 std::vector<T> output;
3789 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003790 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003791 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3792 { input0.data(), input1.data(), input2.data() },
3793 outputTensorInfo,
3794 output.data(),
3795 1,
3796 true);
telsoa014fcda012018-03-09 14:13:49 +00003797
3798 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3799 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3800 // Batch 0, Channel 0
3801 1.0f, 2.0f,
3802
3803 // Batch 0, Channel 1
3804 3.0f, 4.0f,
3805
3806 // Batch 0, Channel 2
3807 5.0f, 6.0f,
3808
3809 // Batch 0, Channel 3
3810 7.0f, 8.0f,
3811
3812 // Batch 0, Channel 4
3813 9.0f, 10.0f,
3814
3815 // Batch 0, Channel 5
3816 11.0f, 12.0f,
3817
3818 // Batch 0, Channel 6
3819 25.0f, 26.0f,
3820
3821 // Batch 0, Channel 7
3822 17.0f, 18.0f,
3823
3824 // Batch 1, Channel 0
3825 19.0f, 20.0f,
3826
3827 // Batch 1, Channel 1
3828 21.0f, 22.0f,
3829
3830 // Batch 1, Channel 2
3831 23.0f, 24.0f,
3832
3833 // Batch 1, Channel 3
3834 27.0f, 28.0f,
3835
3836 // Batch 1, Channel 4
3837 29.0f, 30.0f,
3838
3839 // Batch 1, Channel 5
3840 13.0f, 14.0f,
3841
3842 // Batch 1, Channel 6
3843 15.0f, 16.0f,
3844
3845 // Batch 1, Channel 7
3846 31.0f, 32.0f,
3847 }));
3848
3849 return result;
3850}
3851
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003852LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
3853 armnn::IWorkloadFactory& workloadFactory,
3854 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003855{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003856 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003857}
3858
3859template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003860LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
3861 armnn::IWorkloadFactory& workloadFactory,
3862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003863 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003864 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003865 int32_t qOffset)
3866{
3867 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3868 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3869 // Batch 0, Channel 0
3870 1.0f, 2.0f,
3871
3872 // Batch 0, Channel 1
3873 3.0f, 4.0f,
3874
3875 // Batch 0, Channel 2
3876 5.0f, 6.0f,
3877
3878 // Batch 1, Channel 0
3879 19.0f, 20.0f,
3880
3881 // Batch 1, Channel 1
3882 21.0f, 22.0f,
3883
3884 // Batch 1, Channel 2
3885 23.0f, 24.0f
3886 }));
3887
3888 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
3889 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3890 // Batch 0, Channel 0
3891 7.0f,
3892
3893 // Batch 0, Channel 1
3894 9.0f,
3895
3896 // Batch 0, Channel 2
3897 11.0f,
3898
3899 // Batch 1, Channel 0
3900 25.0f,
3901
3902 // Batch 1, Channel 1
3903 27.0f,
3904
3905 // Batch 1, Channel 2
3906 29.0f
3907 }));
3908
3909 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
3910 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3911 // Batch 0, Channel 0
3912 13.0f, 14.0f, 50.0f,
3913
3914 // Batch 0, Channel 1
3915 15.0f, 16.0f, 51.0f,
3916
3917 // Batch 0, Channel 2
3918 17.0f, 18.0f, 52.0f,
3919
3920 // Batch 1, Channel 0
3921 31.0f, 32.0f, 53.0f,
3922
3923 // Batch 1, Channel 1
3924 33.0f, 34.0f, 54.0f,
3925
3926 // Batch 1, Channel 2
3927 35.0f, 36.0f, 55.0f,
3928 }));
3929
3930 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3931 LayerTestResult<T, 3> result(outputTensorInfo);
3932
3933 std::vector<T> output;
3934 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003935 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003936 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3937 { input0.data(), input1.data(), input2.data() },
3938 outputTensorInfo,
3939 output.data(),
3940 2,
3941 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003942
3943 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3944 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3945 // Batch 0, Channel 0
3946 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3947
3948 // Batch 0, Channel 1
3949 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3950
3951 // Batch 0, Channel 2
3952 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3953
3954 // Batch 1, Channel 0
3955 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3956
3957 // Batch 1, Channel 1
3958 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3959
3960 // Batch 1, Channel 2
3961 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3962 }));
3963
3964 return result;
3965}
3966
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003967LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
3968 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3970 bool useSubtensor)
3971{
3972 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
3973}
3974
3975template <typename T>
3976LayerTestResult<T, 4> Concatenation4dTestImpl(
3977 armnn::IWorkloadFactory& workloadFactory,
3978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3979 const armnn::TensorInfo& outputTensorInfo,
3980 unsigned int dimension,
3981 bool useSubtensor,
3982 float qScale,
3983 int32_t qOffset)
3984{
3985 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
3986
3987 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3988 1.0f, 2.0f,
3989 3.0f, 4.0f,
3990 5.0f, 6.0f,
3991 7.0f, 8.0f,
3992 9.0f, 10.0f,
3993 11.0f, 12.0f
3994 }));
3995
3996 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3997 11.0f, 12.0f,
3998 13.0f, 14.0f,
3999 15.0f, 16.0f,
4000 17.0f, 18.0f,
4001 19.0f, 20.0f,
4002 21.0f, 22.0f
4003 }));
4004
4005 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4006 21.0f, 22.0f,
4007 23.0f, 24.0f,
4008 25.0f, 26.0f,
4009 27.0f, 28.0f,
4010 29.0f, 30.0f,
4011 31.0f, 32.0f
4012 }));
4013
4014 LayerTestResult<T, 4> result(outputTensorInfo);
4015
4016 std::vector<T> output;
4017 output.resize(outputTensorInfo.GetNumElements());
4018
4019 Concatenate<T>(workloadFactory,
4020 memoryManager,
4021 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4022 {input0.data(), input1.data(), input2.data()},
4023 outputTensorInfo,
4024 output.data(),
4025 dimension,
4026 useSubtensor);
4027
4028 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4029 return result;
4030}
4031
4032template <typename T>
4033LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4034 armnn::IWorkloadFactory& workloadFactory,
4035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4036 float qScale,
4037 int32_t qOffset)
4038{
4039 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
4040
4041 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
4042 true, qScale, qOffset);
4043 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4044 1.0f, 2.0f,
4045 3.0f, 4.0f,
4046 5.0f, 6.0f,
4047 7.0f, 8.0f,
4048 9.0f, 10.0f,
4049 11.0f, 12.0f,
4050
4051 11.0f, 12.0f,
4052 13.0f, 14.0f,
4053 15.0f, 16.0f,
4054 17.0f, 18.0f,
4055 19.0f, 20.0f,
4056 21.0f, 22.0f,
4057
4058 21.0f, 22.0f,
4059 23.0f, 24.0f,
4060 25.0f, 26.0f,
4061 27.0f, 28.0f,
4062 29.0f, 30.0f,
4063 31.0f, 32.0f
4064 }));
4065 return result;
4066}
4067
4068LayerTestResult<float, 4> Concatenation4dDim0Test(
4069 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004071{
narpra015cdda352018-11-19 15:30:27 +00004072 return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4073}
4074
4075template <typename T>
4076LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4077 armnn::IWorkloadFactory& workloadFactory,
4078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4079 float qScale,
4080 int32_t qOffset)
4081{
4082 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
4083
4084 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
4085 true, qScale, qOffset);
4086 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4087 1.0f, 2.0f,
4088 3.0f, 4.0f,
4089 5.0f, 6.0f,
4090 7.0f, 8.0f,
4091 9.0f, 10.0f,
4092 11.0f, 12.0f,
4093
4094 11.0f, 12.0f,
4095 13.0f, 14.0f,
4096 15.0f, 16.0f,
4097 17.0f, 18.0f,
4098 19.0f, 20.0f,
4099 21.0f, 22.0f,
4100
4101 21.0f, 22.0f,
4102 23.0f, 24.0f,
4103 25.0f, 26.0f,
4104 27.0f, 28.0f,
4105 29.0f, 30.0f,
4106 31.0f, 32.0f
4107 }));
4108
4109 return result;
4110}
4111
4112LayerTestResult<float, 4> Concatenation4dDim1Test(
4113 armnn::IWorkloadFactory& workloadFactory,
4114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4115{
4116 return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4117}
4118
4119template <typename T>
4120LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4121 armnn::IWorkloadFactory& workloadFactory,
4122 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4123 float qScale,
4124 int32_t qOffset)
4125{
4126 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
4127
4128 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
4129 true, qScale, qOffset);
4130 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4131 1.0f, 2.0f,
4132 3.0f, 4.0f,
4133 11.0f, 12.0f,
4134 13.0f, 14.0f,
4135 21.0f, 22.0f,
4136 23.0f, 24.0f,
4137
4138 5.0f, 6.0f,
4139 7.0f, 8.0f,
4140 15.0f, 16.0f,
4141 17.0f, 18.0f,
4142 25.0f, 26.0f,
4143 27.0f, 28.0f,
4144
4145 9.0f, 10.0f,
4146 11.0f, 12.0f,
4147 19.0f, 20.0f,
4148 21.0f, 22.0f,
4149 29.0f, 30.0f,
4150 31.0f, 32.0f
4151 }));
4152
4153 return result;
4154}
4155
4156LayerTestResult<float, 4> Concatenation4dDim2Test(
4157 armnn::IWorkloadFactory& workloadFactory,
4158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4159{
4160 return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4161}
4162
4163template <typename T>
4164LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4165 armnn::IWorkloadFactory& workloadFactory,
4166 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4167 float qScale,
4168 int32_t qOffset,
4169 bool useSubtensor)
4170{
4171 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
4172
4173 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
4174 useSubtensor, qScale, qOffset);
4175 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4176 1.0f, 2.0f,
4177 11.0f, 12.0f,
4178 21.0f, 22.0f,
4179 3.0f, 4.0f,
4180 13.0f, 14.0f,
4181 23.0f, 24.0f,
4182
4183 5.0f, 6.0f,
4184 15.0f, 16.0f,
4185 25.0f, 26.0f,
4186 7.0f, 8.0f,
4187 17.0f, 18.0f,
4188 27.0f, 28.0f,
4189
4190 9.0f, 10.0f,
4191 19.0f, 20.0f,
4192 29.0f, 30.0f,
4193 11.0f, 12.0f,
4194 21.0f, 22.0f,
4195 31.0f, 32.0f
4196 }));
4197
4198 return result;
4199}
4200
4201LayerTestResult<float, 4> Concatenation4dDim3Test(
4202 armnn::IWorkloadFactory& workloadFactory,
4203 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4204 bool useSubtensor)
4205{
4206 return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
4207}
4208
4209template <typename T>
4210LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4211 armnn::IWorkloadFactory& workloadFactory,
4212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4213 float qScale,
4214 int32_t qOffset)
4215{
4216 unsigned int dimension = 0;
4217 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4218
4219 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4220 1.0f, 2.0f,
4221 3.0f, 4.0f,
4222 5.0f, 6.0f,
4223 7.0f, 8.0f,
4224 9.0f, 10.0f,
4225 11.0f, 12.0f
4226 }));
4227
4228 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
4229
4230 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4231 11.0f, 12.0f,
4232 13.0f, 14.0f,
4233 15.0f, 16.0f,
4234 17.0f, 18.0f,
4235 19.0f, 20.0f,
4236 21.0f, 22.0f,
4237
4238 21.0f, 22.0f,
4239 23.0f, 24.0f,
4240 25.0f, 26.0f,
4241 27.0f, 28.0f,
4242 29.0f, 30.0f,
4243 31.0f, 32.0f
4244
4245 }));
4246
4247 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
4248
4249 LayerTestResult<T, 4> result(outputTensorInfo);
4250
4251 std::vector<T> output;
4252 output.resize(outputTensorInfo.GetNumElements());
4253 Concatenate<T>(workloadFactory,
4254 memoryManager,
4255 {inputTensorInfo0, inputTensorInfo1},
4256 {input0.data(), input1.data()},
4257 outputTensorInfo,
4258 output.data(),
4259 dimension,
4260 true);
4261
4262 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4263 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4264 1.0f, 2.0f,
4265 3.0f, 4.0f,
4266 5.0f, 6.0f,
4267 7.0f, 8.0f,
4268 9.0f, 10.0f,
4269 11.0f, 12.0f,
4270
4271 11.0f, 12.0f,
4272 13.0f, 14.0f,
4273 15.0f, 16.0f,
4274 17.0f, 18.0f,
4275 19.0f, 20.0f,
4276 21.0f, 22.0f,
4277
4278 21.0f, 22.0f,
4279 23.0f, 24.0f,
4280 25.0f, 26.0f,
4281 27.0f, 28.0f,
4282 29.0f, 30.0f,
4283 31.0f, 32.0f
4284 }));
4285
4286 return result;
4287}
4288
4289LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4290 armnn::IWorkloadFactory& workloadFactory,
4291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4292{
4293 return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4294}
4295
4296template <typename T>
4297LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4298 armnn::IWorkloadFactory& workloadFactory,
4299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4300 float qScale,
4301 int32_t qOffset)
4302{
4303 unsigned int dimension = 1;
4304 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4305
4306 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4307 1.0f, 2.0f,
4308 3.0f, 4.0f,
4309 5.0f, 6.0f,
4310 7.0f, 8.0f,
4311 9.0f, 10.0f,
4312 11.0f, 12.0f
4313 }));
4314
4315 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
4316
4317 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4318 11.0f, 12.0f,
4319 13.0f, 14.0f,
4320 15.0f, 16.0f,
4321 17.0f, 18.0f,
4322
4323 }));
4324
4325 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
4326
4327 LayerTestResult<T, 4> result(outputTensorInfo);
4328
4329 std::vector<T> output;
4330 output.resize(outputTensorInfo.GetNumElements());
4331 Concatenate<T>(workloadFactory,
4332 memoryManager,
4333 {inputTensorInfo0, inputTensorInfo1},
4334 {input0.data(), input1.data()},
4335 outputTensorInfo,
4336 output.data(),
4337 dimension,
4338 true);
4339
4340 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4341 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4342 1.0f, 2.0f,
4343 3.0f, 4.0f,
4344 5.0f, 6.0f,
4345 7.0f, 8.0f,
4346 9.0f, 10.0f,
4347 11.0f, 12.0f,
4348 11.0f, 12.0f,
4349 13.0f, 14.0f,
4350 15.0f, 16.0f,
4351 17.0f, 18.0f
4352 }));
4353
4354 return result;
4355}
4356
4357LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4358 armnn::IWorkloadFactory& workloadFactory,
4359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4360{
4361 return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4362}
4363
4364template <typename T>
4365LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4366 armnn::IWorkloadFactory& workloadFactory,
4367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4368 float qScale,
4369 int32_t qOffset)
4370{
4371 unsigned int dimension = 2;
4372 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4373
4374 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4375 1.0f, 2.0f,
4376 3.0f, 4.0f,
4377 5.0f, 6.0f,
4378 7.0f, 8.0f,
4379 9.0f, 10.0f,
4380 11.0f, 12.0f
4381 }));
4382
4383 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
4384
4385 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4386 11.0f, 12.0f,
4387 13.0f, 14.0f,
4388 15.0f, 16.0f,
4389 17.0f, 18.0f,
4390 19.0f, 20.0f,
4391 21.0f, 22.0f,
4392 23.0f, 24.0f,
4393 25.0f, 26.0f,
4394 27.0f, 28.0f
4395 }));
4396
4397 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
4398
4399 LayerTestResult<T, 4> result(outputTensorInfo);
4400
4401 std::vector<T> output;
4402 output.resize(outputTensorInfo.GetNumElements());
4403 Concatenate<T>(workloadFactory,
4404 memoryManager,
4405 {inputTensorInfo0, inputTensorInfo1},
4406 {input0.data(), input1.data()},
4407 outputTensorInfo,
4408 output.data(),
4409 dimension,
4410 true);
4411
4412 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4413 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4414 1.0f, 2.0f,
4415 3.0f, 4.0f,
4416 11.0f, 12.0f,
4417 13.0f, 14.0f,
4418 15.0f, 16.0f,
4419
4420 5.0f, 6.0f,
4421 7.0f, 8.0f,
4422 17.0f, 18.0f,
4423 19.0f, 20.0f,
4424 21.0f, 22.0f,
4425
4426 9.0f, 10.0f,
4427 11.0f, 12.0f,
4428 23.0f, 24.0f,
4429 25.0f, 26.0f,
4430 27.0f, 28.0f
4431 }));
4432
4433 return result;
4434}
4435
4436LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4437 armnn::IWorkloadFactory& workloadFactory,
4438 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4439{
4440 return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4441}
4442
4443template <typename T>
4444LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4445 armnn::IWorkloadFactory& workloadFactory,
4446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4447 float qScale,
4448 int32_t qOffset,
4449 bool useSubtensor)
4450{
4451 unsigned int dimension = 3;
4452 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4453
4454 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4455 1.0f, 2.0f,
4456 3.0f, 4.0f,
4457 5.0f, 6.0f,
4458 7.0f, 8.0f,
4459 9.0f, 10.0f,
4460 11.0f, 12.0f
4461 }));
4462
4463 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
4464
4465 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4466 11.0f, 12.0f, 13.0f,
4467 14.0f, 15.0f, 16.0f,
4468
4469 17.0f, 18.0f, 19.0f,
4470 20.0f, 21.0f, 22.0f,
4471
4472 23.0f, 24.0f, 25.0f,
4473 26.0f, 27.0f, 28.0f
4474 }));
4475
4476 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
4477
4478 LayerTestResult<T, 4> result(outputTensorInfo);
4479
4480 std::vector<T> output;
4481 output.resize(outputTensorInfo.GetNumElements());
4482 Concatenate<T>(workloadFactory,
4483 memoryManager,
4484 {inputTensorInfo0, inputTensorInfo1},
4485 {input0.data(), input1.data()},
4486 outputTensorInfo,
4487 output.data(),
4488 dimension,
4489 useSubtensor);
4490
4491 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4492 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4493 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4494 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4495 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4496 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4497 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4498 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4499 }));
4500
4501 return result;
4502}
4503
4504LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4505 armnn::IWorkloadFactory& workloadFactory,
4506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4507 bool useSubtensor)
4508{
4509 return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004510}
4511
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004512LayerTestResult<float, 4> ResizeBilinearNopTest(
4513 armnn::IWorkloadFactory& workloadFactory,
4514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004515 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004516{
Nina Drozdd41b2592018-11-19 13:03:36 +00004517 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4518 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004519
James Conroy6b965822018-11-01 11:33:09 +00004520 std::vector<float> inputData({
4521 1.0f, 2.0f, 3.0f, 4.0f,
4522 2.0f, 3.0f, 4.0f, 5.0f,
4523 3.0f, 4.0f, 5.0f, 6.0f,
4524 4.0f, 5.0f, 6.0f, 7.0f,
4525
telsoa014fcda012018-03-09 14:13:49 +00004526 1.0f, 2.0f, 3.0f, 4.0f,
4527 2.0f, 3.0f, 4.0f, 5.0f,
4528 3.0f, 4.0f, 5.0f, 6.0f,
4529 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004530 });
4531
4532 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004533 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004534 {
4535 std::vector<float> tmp(inputData.size());
4536 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4537 inputData = tmp;
4538 }
4539
4540 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004541
4542 LayerTestResult<float, 4> result(outputTensorInfo);
4543 result.outputExpected = input;
4544
4545 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4546 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4547
4548 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004549 descriptor.m_Parameters.m_DataLayout = dataLayout;
4550 armnn::WorkloadInfo info;
4551 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4552 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4553
4554 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4555
4556 inputHandle->Allocate();
4557 outputHandle->Allocate();
4558 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4559
James Conroy074f3712018-10-03 09:32:03 +01004560 workload->Execute();
4561
4562 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4563 return result;
4564}
4565
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004566LayerTestResult<float, 4> SimpleResizeBilinearTest(
4567 armnn::IWorkloadFactory& workloadFactory,
4568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004569 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01004570{
Nina Drozdd41b2592018-11-19 13:03:36 +00004571 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
4572 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01004573
James Conroy6b965822018-11-01 11:33:09 +00004574 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004575 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00004576 200.0f, 250.0f,
4577
4578 250.0f, 200.0f,
4579 250.0f, 1.0f
4580 });
James Conroy074f3712018-10-03 09:32:03 +01004581
4582 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4583 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00004584 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
4585 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
4586 // which we would expect if projecting the centre).
4587
4588 std::vector<float> outputData({
4589 1.0f,
4590
4591 250.0f
4592 });
4593
4594 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004595 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004596 {
4597 std::vector<float> tmp(inputData.size());
4598 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4599 inputData = tmp;
4600
4601 std::vector<float> tmp1(outputData.size());
4602 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4603 outputData = tmp1;
4604 }
4605
4606 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4607
James Conroy074f3712018-10-03 09:32:03 +01004608 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004609 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01004610
4611 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4612 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4613
4614 armnn::ResizeBilinearQueueDescriptor descriptor;
4615 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004616 armnn::WorkloadInfo info;
4617 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4618 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4619
4620 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4621
4622 inputHandle->Allocate();
4623 outputHandle->Allocate();
4624 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4625
4626 workload->Execute();
4627
4628 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4629 return result;
4630}
4631
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004632LayerTestResult<float, 4> ResizeBilinearSqMinTest(
4633 armnn::IWorkloadFactory& workloadFactory,
4634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004635 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004636{
Nina Drozdd41b2592018-11-19 13:03:36 +00004637 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4638 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004639
James Conroy6b965822018-11-01 11:33:09 +00004640 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004641 1.0f, 2.0f, 3.0f, 4.0f,
4642 2.0f, 3.0f, 4.0f, 5.0f,
4643 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00004644 4.0f, 5.0f, 6.0f, 7.0f,
4645
4646 7.0f, 6.0f, 5.0f, 4.0f,
4647 6.0f, 5.0f, 4.0f, 3.0f,
4648 5.0f, 4.0f, 3.0f, 2.0f,
4649 4.0f, 3.0f, 2.0f, 1.0f
4650 });
4651
4652 std::vector<float> outputData({
4653 1.0f, 3.0f,
4654 3.0f, 5.0f,
4655
4656 7.0f, 5.0f,
4657 5.0f, 3.0f
4658 });
4659
4660 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004661 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004662 {
4663 std::vector<float> tmp(inputData.size());
4664 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4665 inputData = tmp;
4666
4667 std::vector<float> tmp1(outputData.size());
4668 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4669 outputData = tmp1;
4670 }
4671
4672 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004673
telsoa014fcda012018-03-09 14:13:49 +00004674 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004675 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004676
4677 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4678 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4679
4680 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004681 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004682 armnn::WorkloadInfo info;
4683 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4684 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4685
4686 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4687
4688 inputHandle->Allocate();
4689 outputHandle->Allocate();
4690 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4691
4692 workload->Execute();
4693
4694 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4695 return result;
4696}
4697
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004698LayerTestResult<float, 4> ResizeBilinearMinTest(
4699 armnn::IWorkloadFactory& workloadFactory,
4700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004701 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004702{
Nina Drozdd41b2592018-11-19 13:03:36 +00004703 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
4704 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004705
James Conroy6b965822018-11-01 11:33:09 +00004706 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004707 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
4708 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00004709 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
4710
4711 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
4712 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
4713 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
4714 });
4715
4716 std::vector<float> outputData({
4717 1.0f, 2.6666f, 6.00f,
4718 78.5f, 179.3333f, 401.00f,
4719
4720 987.0f, 454.6670f, 203.33f,
4721 48.5f, 22.3333f, 10.00f
4722 });
4723
4724 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004725 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004726 {
4727 std::vector<float> tmp(inputData.size());
4728 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4729 inputData = tmp;
4730
4731 std::vector<float> tmp1(outputData.size());
4732 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4733 outputData = tmp1;
4734 }
4735
4736 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004737
4738 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004739 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004740
4741 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4742 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4743
4744 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004745 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004746 armnn::WorkloadInfo info;
4747 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4748 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4749
4750 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4751
4752 inputHandle->Allocate();
4753 outputHandle->Allocate();
4754 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4755
4756 workload->Execute();
4757
4758 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4759 return result;
4760}
4761
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004762LayerTestResult<float, 4> ResizeBilinearMagTest(
4763 armnn::IWorkloadFactory& workloadFactory,
4764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004765 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004766{
Nina Drozdd41b2592018-11-19 13:03:36 +00004767 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
4768 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004769
James Conroy6b965822018-11-01 11:33:09 +00004770 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004771 1.0f, 2.0f,
4772 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004773 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00004774
James Conroy6b965822018-11-01 11:33:09 +00004775 233.0f, 144.0f,
4776 21.0f, 13.0f,
4777 2.0f, 1.0f
4778 });
4779
4780 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01004781 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
4782 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004783 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
4784
4785 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
4786 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
4787 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
4788 });
4789
4790 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004791 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004792 {
4793 std::vector<float> tmp(inputData.size());
4794 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4795 inputData = tmp;
4796
4797 std::vector<float> tmp1(outputData.size());
4798 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4799 outputData = tmp1;
4800 }
4801
4802 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4803
4804 LayerTestResult<float, 4> result(outputTensorInfo);
4805 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004806
4807 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4808 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4809
4810 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004811 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004812 armnn::WorkloadInfo info;
4813 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4814 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4815
4816 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4817
4818 inputHandle->Allocate();
4819 outputHandle->Allocate();
4820 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4821
4822 workload->Execute();
4823
4824 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4825 return result;
4826}
4827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004828LayerTestResult<float, 2> FakeQuantizationTest(
4829 armnn::IWorkloadFactory& workloadFactory,
4830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004831{
4832 constexpr unsigned int width = 2;
4833 constexpr unsigned int height = 3;
4834
4835 const armnn::TensorInfo tensorInfo({height, width },
4836 armnn::DataType::Float32);
4837 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4838 -10.0f, -5.0f,
4839 0.0f, 5.0f,
4840 10.0f, 10.0f
4841 }));
4842
4843 LayerTestResult<float, 2> ret(tensorInfo);
4844
4845 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4846
4847 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4848
4849 armnn::FakeQuantizationQueueDescriptor data;
4850 armnn::WorkloadInfo info;
4851
4852 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
4853 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
4854 float min = -10.f;
4855 float max = 10.f;
4856
4857 data.m_Parameters.m_Min = min;
4858 data.m_Parameters.m_Max = max;
4859
4860 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
4861 armnn::FakeQuantizationQueueDescriptor refData = data;
4862 armnn::WorkloadInfo refInfo = info;
4863 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
4864
4865 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
4866
4867 inputHandle->Allocate();
4868 outputHandle->Allocate();
4869
4870 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
4871
4872 workload->Execute();
4873
4874 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
4875
4876 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4877 0.0f, 63.0f,
4878 128.0f, 191.0f,
4879 255.0f, 255.0f
4880 }));
4881 return ret;
4882}
4883
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004884namespace
4885{
4886
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004887LayerTestResult<float, 4> L2NormalizationTestImpl(
4888 armnn::IWorkloadFactory& workloadFactory,
4889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4890 const armnn::TensorShape& inputOutputTensorShape,
4891 const std::vector<float>& inputValues,
4892 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00004893 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004894{
4895 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4896 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4897
jimfly013aab7c32018-11-12 13:32:08 +00004898 // at this point if we require it permute the input data
4899 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
4900 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004901 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004902 {
4903 std::vector<float> tmp(inputData.size());
4904 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4905 inputData = tmp;
4906 }
4907
4908 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004909
4910 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00004911 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004912 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004913 {
4914 std::vector<float> tmp(expectedOutputData.size());
4915 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
4916 expectedOutputData = tmp;
4917 }
4918 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004919
4920 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4921 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4922
4923 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00004924 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004925 armnn::WorkloadInfo info;
4926
4927 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4928 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4929
4930 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
4931
4932 inputHandle->Allocate();
4933 outputHandle->Allocate();
4934
4935 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4936
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004937 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004938
4939 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4940
4941 return result;
4942}
4943
4944float CalcInvL2Norm(std::initializer_list<float> elements)
4945{
4946 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
4947 [](float acc, float element) { return acc + element * element; });
4948 return 1.0f / sqrtf(reduction);
4949}
4950
4951} // anonymous namespace
4952
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004953template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004954LayerTestResult<T, 2> Pad2dTestCommon(
4955 armnn::IWorkloadFactory& workloadFactory,
4956 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4957 float qScale,
4958 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004959{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004960 const armnn::TensorShape inputShape{ 3, 3 };
4961 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004962
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004963 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4964 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004965
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004966 std::vector<T> inputValues(
4967 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004968 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004969 // Height (3) x Width (3)
4970 4, 8, 6,
4971 7, 4, 4,
4972 3, 2, 4
4973 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004974
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004975 std::vector<T> expectedOutputValues(
4976 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004977 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004978 0, 0, 0, 0, 0, 0, 0,
4979 0, 0, 0, 0, 0, 0, 0,
4980 0, 0, 4, 8, 6, 0, 0,
4981 0, 0, 7, 4, 4, 0, 0,
4982 0, 0, 3, 2, 4, 0, 0,
4983 0, 0, 0, 0, 0, 0, 0,
4984 0, 0, 0, 0, 0, 0, 0
4985 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004986
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004987 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004988
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004989 LayerTestResult<T, 2> result(outputTensorInfo);
4990 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004991
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004992 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4993 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004994
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004995 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004996
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004997 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4998 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
4999 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005000
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005001 descriptor.m_Parameters.m_PadList = PadList;
5002 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005003
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005004 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5005 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005006
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005007 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005008
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005009 inputHandle->Allocate();
5010 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005011
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005012 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005013
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005014 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005015
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005016 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005017
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005018 return result;
5019}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005020
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005021template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005022LayerTestResult<T, 3> Pad3dTestCommon(
5023 armnn::IWorkloadFactory& workloadFactory,
5024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5025 float qScale,
5026 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005027{
5028 const armnn::TensorShape inputShape{ 2, 2, 2 };
5029 const armnn::TensorShape outputShape{ 3, 5, 6 };
5030
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005031 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
5032 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005033
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005034 std::vector<T> inputValues(
5035 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005036 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005037 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005038 0, 4,
5039 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005040
5041 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005042 6, 1,
5043 5, 2
5044 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005045
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005046 std::vector<T> expectedOutputValues(
5047 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005048 {
5049
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005050 0, 0, 0, 0, 0, 0,
5051 0, 0, 0, 0, 0, 0,
5052 0, 0, 0, 4, 0, 0,
5053 0, 0, 2, 5, 0, 0,
5054 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005055
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005056 0, 0, 0, 0, 0, 0,
5057 0, 0, 0, 0, 0, 0,
5058 0, 0, 6, 1, 0, 0,
5059 0, 0, 5, 2, 0, 0,
5060 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005061
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005062 0, 0, 0, 0, 0, 0,
5063 0, 0, 0, 0, 0, 0,
5064 0, 0, 0, 0, 0, 0,
5065 0, 0, 0, 0, 0, 0,
5066 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005067
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005068 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005069
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005070 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005071
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005072 LayerTestResult<T, 3> result(outputTensorInfo);
5073 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005074
5075 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5076 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5077
5078 armnn::PadQueueDescriptor descriptor;
5079
5080 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5081 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5082 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5083 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5084
5085 descriptor.m_Parameters.m_PadList = PadList;
5086 armnn::WorkloadInfo info;
5087
5088 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5089 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5090
5091 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5092
5093 inputHandle->Allocate();
5094 outputHandle->Allocate();
5095
5096 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5097
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005098 workload->Execute();
5099
5100 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5101
5102 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005103}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005104
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005105template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005106LayerTestResult<T, 4> Pad4dTestCommon(
5107 armnn::IWorkloadFactory& workloadFactory,
5108 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5109 float qScale,
5110 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005111{
5112 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5113 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5114
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005115 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
5116 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005117
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005118 std::vector<T> inputValues(
5119 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005120 {
5121 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005122 0, 1,
5123 2, 3,
5124 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005125
5126 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005127 6, 7,
5128 8, 9,
5129 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005130
5131 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005132 12, 13,
5133 14, 15,
5134 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005135
5136 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005137 18, 19,
5138 20, 21,
5139 22, 23
5140 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005141
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005142 std::vector<T> expectedOutputValues(
5143 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005144 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005145 0, 0, 0, 0,
5146 0, 0, 0, 0,
5147 0, 0, 0, 0,
5148 0, 0, 0, 0,
5149 0, 0, 0, 0,
5150 0, 0, 0, 0,
5151 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005152
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005153 0, 0, 0, 0,
5154 0, 0, 0, 0,
5155 0, 0, 0, 0,
5156 0, 0, 0, 0,
5157 0, 0, 0, 0,
5158 0, 0, 0, 0,
5159 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005160
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005161 0, 0, 0, 0,
5162 0, 0, 0, 0,
5163 0, 0, 0, 0,
5164 0, 0, 0, 0,
5165 0, 0, 0, 0,
5166 0, 0, 0, 0,
5167 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005168
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005169 0, 0, 0, 0,
5170 0, 0, 0, 0,
5171 0, 0, 0, 0,
5172 0, 0, 0, 0,
5173 0, 0, 0, 0,
5174 0, 0, 0, 0,
5175 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005176
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005177 0, 0, 0, 0,
5178 0, 0, 0, 0,
5179 0, 0, 0, 0,
5180 0, 0, 0, 0,
5181 0, 0, 0, 0,
5182 0, 0, 0, 0,
5183 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005184
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005185 0, 0, 0, 0,
5186 0, 0, 0, 0,
5187 0, 0, 0, 0,
5188 0, 0, 0, 0,
5189 0, 0, 0, 0,
5190 0, 0, 0, 0,
5191 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005192
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005193 0, 0, 0, 0,
5194 0, 0, 0, 0,
5195 0, 0, 0, 0,
5196 0, 0, 0, 0,
5197 0, 0, 0, 0,
5198 0, 0, 0, 0,
5199 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005200
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005201 0, 0, 0, 0,
5202 0, 0, 0, 0,
5203 0, 0, 0, 0,
5204 0, 0, 1, 0,
5205 0, 2, 3, 0,
5206 0, 4, 5, 0,
5207 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005208
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005209 0, 0, 0, 0,
5210 0, 0, 0, 0,
5211 0, 0, 0, 0,
5212 0, 6, 7, 0,
5213 0, 8, 9, 0,
5214 0, 10, 11, 0,
5215 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005216
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005217 0, 0, 0, 0,
5218 0, 0, 0, 0,
5219 0, 0, 0, 0,
5220 0, 0, 0, 0,
5221 0, 0, 0, 0,
5222 0, 0, 0, 0,
5223 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005224
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005225 0, 0, 0, 0,
5226 0, 0, 0, 0,
5227 0, 0, 0, 0,
5228 0, 0, 0, 0,
5229 0, 0, 0, 0,
5230 0, 0, 0, 0,
5231 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005232
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005233 0, 0, 0, 0,
5234 0, 0, 0, 0,
5235 0, 0, 0, 0,
5236 0, 0, 0, 0,
5237 0, 0, 0, 0,
5238 0, 0, 0, 0,
5239 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005240
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005241 0, 0, 0, 0,
5242 0, 0, 0, 0,
5243 0, 0, 0, 0,
5244 0, 12, 13, 0,
5245 0, 14, 15, 0,
5246 0, 16, 17, 0,
5247 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005248
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005249 0, 0, 0, 0,
5250 0, 0, 0, 0,
5251 0, 0, 0, 0,
5252 0, 18, 19, 0,
5253 0, 20, 21, 0,
5254 0, 22, 23, 0,
5255 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005256
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005257 0, 0, 0, 0,
5258 0, 0, 0, 0,
5259 0, 0, 0, 0,
5260 0, 0, 0, 0,
5261 0, 0, 0, 0,
5262 0, 0, 0, 0,
5263 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005264
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005265 0, 0, 0, 0,
5266 0, 0, 0, 0,
5267 0, 0, 0, 0,
5268 0, 0, 0, 0,
5269 0, 0, 0, 0,
5270 0, 0, 0, 0,
5271 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005272
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005273 0, 0, 0, 0,
5274 0, 0, 0, 0,
5275 0, 0, 0, 0,
5276 0, 0, 0, 0,
5277 0, 0, 0, 0,
5278 0, 0, 0, 0,
5279 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005280
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005281 0, 0, 0, 0,
5282 0, 0, 0, 0,
5283 0, 0, 0, 0,
5284 0, 0, 0, 0,
5285 0, 0, 0, 0,
5286 0, 0, 0, 0,
5287 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005288
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005289 0, 0, 0, 0,
5290 0, 0, 0, 0,
5291 0, 0, 0, 0,
5292 0, 0, 0, 0,
5293 0, 0, 0, 0,
5294 0, 0, 0, 0,
5295 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005296
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005297 0, 0, 0, 0,
5298 0, 0, 0, 0,
5299 0, 0, 0, 0,
5300 0, 0, 0, 0,
5301 0, 0, 0, 0,
5302 0, 0, 0, 0,
5303 0, 0, 0, 0
5304 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005305
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005306 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005307
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005308 LayerTestResult<T, 4> result(outputTensorInfo);
5309 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005310
5311 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5312 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5313
5314 armnn::PadQueueDescriptor descriptor;
5315
5316 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5317 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5318 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5319 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5320 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5321
5322 descriptor.m_Parameters.m_PadList = PadList;
5323 armnn::WorkloadInfo info;
5324
5325 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5326 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5327
5328 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5329
5330 inputHandle->Allocate();
5331 outputHandle->Allocate();
5332
5333 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5334
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005335 workload->Execute();
5336
5337 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5338
5339 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005340}
5341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005342LayerTestResult<uint8_t, 2> PadUint82dTest(
5343 armnn::IWorkloadFactory& workloadFactory,
5344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005345{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005346 return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005347}
5348
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005349LayerTestResult<uint8_t, 3> PadUint83dTest(
5350 armnn::IWorkloadFactory& workloadFactory,
5351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005352{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005353 return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005354}
5355
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005356LayerTestResult<uint8_t, 4> PadUint84dTest(
5357 armnn::IWorkloadFactory& workloadFactory,
5358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005359{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005360 return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005361}
5362
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005363LayerTestResult<float, 2> PadFloat322dTest(
5364 armnn::IWorkloadFactory& workloadFactory,
5365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005366{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005367 return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005368}
5369
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005370LayerTestResult<float, 3> PadFloat323dTest(
5371 armnn::IWorkloadFactory& workloadFactory,
5372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005373{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005374 return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005375}
5376
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005377LayerTestResult<float, 4> PadFloat324dTest(
5378 armnn::IWorkloadFactory& workloadFactory,
5379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005380{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005381 return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005382}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005383
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005384LayerTestResult<float, 4> L2Normalization1dTest(
5385 armnn::IWorkloadFactory& workloadFactory,
5386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005387 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005388{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005389 // Width: 1
5390 // Height: 1
5391 // Channels: 10
5392 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005393 unsigned int numberOfBatches = 1;
5394 unsigned int numberOfChannels = 10;
5395 unsigned int height = 1;
5396 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005397
jimfly013aab7c32018-11-12 13:32:08 +00005398
Nina Drozdd41b2592018-11-19 13:03:36 +00005399 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005400 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005401 std::vector<float> inputValues
5402 {
5403 // Batch 0, Channel 0, Height (1) x Width (1)
5404 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005405
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005406 // Batch 0, Channel 1, Height (1) x Width (1)
5407 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005408
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005409 // Batch 0, Channel 2, Height (1) x Width (1)
5410 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005411
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005412 // Batch 0, Channel 3, Height (1) x Width (1)
5413 4.0f,
5414
5415 // Batch 0, Channel 4, Height (1) x Width (1)
5416 5.0f,
5417
5418 // Batch 0, Channel 5, Height (1) x Width (1)
5419 6.0f,
5420
5421 // Batch 0, Channel 6, Height (1) x Width (1)
5422 7.0f,
5423
5424 // Batch 0, Channel 7, Height (1) x Width (1)
5425 8.0f,
5426
5427 // Batch 0, Channel 8, Height (1) x Width (1)
5428 9.0f,
5429
5430 // Batch 0, Channel 9, Height (1) x Width (1)
5431 10.0f
5432 };
telsoa014fcda012018-03-09 14:13:49 +00005433 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005434 std::vector<float> expectedOutputValues
5435 {
5436 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005437 1.0f * approxInvL2Norm,
5438 2.0f * approxInvL2Norm,
5439 3.0f * approxInvL2Norm,
5440 4.0f * approxInvL2Norm,
5441 5.0f * approxInvL2Norm,
5442 6.0f * approxInvL2Norm,
5443 7.0f * approxInvL2Norm,
5444 8.0f * approxInvL2Norm,
5445 9.0f * approxInvL2Norm,
5446 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005447 };
telsoa014fcda012018-03-09 14:13:49 +00005448
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005449
5450 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005451 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005452}
5453
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005454LayerTestResult<float, 4> L2Normalization2dTest(
5455 armnn::IWorkloadFactory& workloadFactory,
5456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005457 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005458{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005459 // Width: 5
5460 // Height: 1
5461 // Channels: 2
5462 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005463 unsigned int numberOfBatches = 1;
5464 unsigned int numberOfChannels = 2;
5465 unsigned int height = 1;
5466 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005467
Nina Drozdd41b2592018-11-19 13:03:36 +00005468 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005469 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005470 std::vector<float> inputValues
5471 {
5472 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005473 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005474
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005475 // Batch 0, Channel 1, Height (1) x Width (5)
5476 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5477 };
5478 std::vector<float> expectedOutputValues
5479 {
5480 // Batch 0, Channel 0, Height (1) x Width (5)
5481 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5482 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5483 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5484 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005485 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5486
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005487 // Batch 0, Channel 1, Height (1) x Width (5)
5488 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5489 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5490 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5491 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005492 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005493 };
telsoa014fcda012018-03-09 14:13:49 +00005494
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005495 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005496 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005497}
telsoa014fcda012018-03-09 14:13:49 +00005498
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005499LayerTestResult<float, 4> L2Normalization3dTest(
5500 armnn::IWorkloadFactory& workloadFactory,
5501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005502 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005503{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005504 // Width: 3
5505 // Height: 4
5506 // Channels: 2
5507 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005508 unsigned int numberOfBatches = 1;
5509 unsigned int numberOfChannels = 2;
5510 unsigned int height = 4;
5511 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005512
Nina Drozdd41b2592018-11-19 13:03:36 +00005513 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005514 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005515 std::vector<float> inputValues
5516 {
5517 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005518 119.0f, 21.0f, 150.0f,
5519 149.0f, 32.0f, 179.0f,
5520 15.0f, 227.0f, 141.0f,
5521 147.0f, 199.0f, 220.0f,
5522
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005523 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005524 110.0f, 140.0f, 73.0f,
5525 211.0f, 212.0f, 89.0f,
5526 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005527 162.0f, 12.0f, 161.0f
5528 };
5529 std::vector<float> expectedOutputValues
5530 {
5531 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005532 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5533 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5534 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5535 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5536 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5537 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5538 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5539 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5540 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5541 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5542 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
5543 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
5544
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005545 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005546 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5547 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5548 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5549 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5550 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5551 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5552 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5553 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5554 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5555 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5556 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005557 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
5558 };
telsoa014fcda012018-03-09 14:13:49 +00005559
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005560 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005561 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005562}
telsoa014fcda012018-03-09 14:13:49 +00005563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005564LayerTestResult<float, 4> L2Normalization4dTest(
5565 armnn::IWorkloadFactory& workloadFactory,
5566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005567 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005568{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005569 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005570 // Height: 4
5571 // Channels: 3
5572 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00005573 unsigned int numberOfBatches = 2;
5574 unsigned int numberOfChannels = 3;
5575 unsigned int height = 4;
5576 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005577
Nina Drozdd41b2592018-11-19 13:03:36 +00005578 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005579 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005580 std::vector<float> inputValues
5581 {
5582 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005583 235.0f, 46.0f, 178.0f,
5584 100.0f, 123.0f, 19.0f,
5585 172.0f, 74.0f, 250.0f,
5586 6.0f, 195.0f, 80.0f,
5587
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005588 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005589 113.0f, 95.0f, 202.0f,
5590 77.0f, 114.0f, 71.0f,
5591 122.0f, 246.0f, 166.0f,
5592 82.0f, 28.0f, 37.0f,
5593
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005594 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005595 56.0f, 170.0f, 162.0f,
5596 194.0f, 89.0f, 254.0f,
5597 12.0f, 209.0f, 200.0f,
5598 1.0f, 64.0f, 54.0f,
5599
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005600 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005601 67.0f, 90.0f, 49.0f,
5602 7.0f, 163.0f, 18.0f,
5603 25.0f, 117.0f, 103.0f,
5604 247.0f, 59.0f, 189.0f,
5605
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005606 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005607 239.0f, 104.0f, 199.0f,
5608 17.0f, 124.0f, 153.0f,
5609 222.0f, 217.0f, 75.0f,
5610 32.0f, 126.0f, 21.0f,
5611
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005612 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005613 97.0f, 145.0f, 215.0f,
5614 115.0f, 116.0f, 238.0f,
5615 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005616 92.0f, 125.0f, 88.0f
5617 };
5618 std::vector<float> expectedOutputValues
5619 {
5620 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005621 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5622 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5623 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5624 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5625 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5626 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5627 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5628 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5629 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5630 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5631 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5632 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5633
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005634 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005635 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5636 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5637 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5638 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5639 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5640 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5641 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5642 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5643 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5644 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5645 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5646 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5647
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005648 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005649 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5650 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5651 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5652 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5653 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5654 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5655 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5656 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5657 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5658 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5659 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5660 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5661
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005662 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005663 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5664 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5665 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5666 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5667 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5668 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5669 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5670 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5671 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5672 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5673 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5674 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5675
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005676 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005677 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5678 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5679 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5680 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5681 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5682 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5683 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5684 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5685 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5686 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5687 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5688 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5689
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005690 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005691 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5692 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5693 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5694 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5695 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5696 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5697 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5698 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5699 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5700 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5701 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005702 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
5703 };
telsoa014fcda012018-03-09 14:13:49 +00005704
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005705 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005706 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005707}
5708
5709template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005710LayerTestResult<T, 4> ConstantTestImpl(
5711 armnn::IWorkloadFactory& workloadFactory,
5712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005713 float qScale,
5714 int32_t qOffset)
5715{
5716 constexpr unsigned int inputWidth = 3;
5717 constexpr unsigned int inputHeight = 4;
5718 constexpr unsigned int inputChannels = 3;
5719 constexpr unsigned int inputBatchSize = 2;
5720
5721 constexpr unsigned int outputWidth = inputWidth;
5722 constexpr unsigned int outputHeight = inputHeight;
5723 constexpr unsigned int outputChannels = inputChannels;
5724 constexpr unsigned int outputBatchSize = inputBatchSize;
5725
5726 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5727 armnn::GetDataType<T>());
5728
5729 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5730 armnn::GetDataType<T>());
5731
5732 // Set quantization parameters if the requested type is a quantized type.
5733 if(armnn::IsQuantizedType<T>())
5734 {
5735 inputTensorInfo.SetQuantizationScale(qScale);
5736 inputTensorInfo.SetQuantizationOffset(qOffset);
5737 outputTensorInfo.SetQuantizationScale(qScale);
5738 outputTensorInfo.SetQuantizationOffset(qOffset);
5739 }
5740
5741 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
5742 QuantizedVector<T>(qScale, qOffset, {
5743 // Batch 0, Channel 0
5744 235.0f, 46.0f, 178.0f,
5745 100.0f, 123.0f, 19.0f,
5746 172.0f, 74.0f, 250.0f,
5747 6.0f, 195.0f, 80.0f,
5748
5749 // Batch 0, Channel 1
5750 113.0f, 95.0f, 202.0f,
5751 77.0f, 114.0f, 71.0f,
5752 122.0f, 246.0f, 166.0f,
5753 82.0f, 28.0f, 37.0f,
5754
5755 // Batch 0, Channel 2
5756 56.0f, 170.0f, 162.0f,
5757 194.0f, 89.0f, 254.0f,
5758 12.0f, 209.0f, 200.0f,
5759 1.0f, 64.0f, 54.0f,
5760
5761 // Batch 1, Channel 0
5762 67.0f, 90.0f, 49.0f,
5763 7.0f, 163.0f, 18.0f,
5764 25.0f, 117.0f, 103.0f,
5765 247.0f, 59.0f, 189.0f,
5766
5767 // Batch 1, Channel 1
5768 239.0f, 104.0f, 199.0f,
5769 17.0f, 124.0f, 153.0f,
5770 222.0f, 217.0f, 75.0f,
5771 32.0f, 126.0f, 21.0f,
5772
5773 // Batch 1, Channel 2
5774 97.0f, 145.0f, 215.0f,
5775 115.0f, 116.0f, 238.0f,
5776 226.0f, 16.0f, 132.0f,
5777 92.0f, 125.0f, 88.0f,
5778 })));
5779
5780 LayerTestResult<T, 4> result(outputTensorInfo);
5781 result.outputExpected = input;
5782
5783 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5784
5785 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
5786 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
5787
5788 armnn::ConstantQueueDescriptor descriptor;
5789 descriptor.m_LayerOutput = &constantTensor;
5790
5791 armnn::WorkloadInfo info;
5792 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5793
5794 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
5795
5796 outputHandle->Allocate();
5797
5798 workload->Execute();
5799
5800 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5801 return result;
5802}
5803
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005804LayerTestResult<float, 4> ConstantTest(
5805 armnn::IWorkloadFactory& workloadFactory,
5806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005807{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005808 return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005809}
5810
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005811LayerTestResult<uint8_t, 4> ConstantTestUint8(
5812 armnn::IWorkloadFactory& workloadFactory,
5813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005814{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005815 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005816}
5817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005818LayerTestResult<uint8_t, 3> MergerUint8Test(
5819 armnn::IWorkloadFactory& workloadFactory,
5820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005821{
surmeh013537c2c2018-05-18 16:31:43 +01005822 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00005823 unsigned int outputHeight = 6;
5824 unsigned int outputChannels = 3;
5825
surmeh013537c2c2018-05-18 16:31:43 +01005826 unsigned int inputWidth1 = 3;
5827 unsigned int inputHeight1 = 6;
5828 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00005829
surmeh013537c2c2018-05-18 16:31:43 +01005830 unsigned int inputWidth2 = 3;
5831 unsigned int inputHeight2 = 6;
5832 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00005833
telsoa01c577f2c2018-08-31 09:22:23 +01005834 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00005835 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
5836 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
5837 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00005838
telsoa01c577f2c2018-08-31 09:22:23 +01005839 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00005840 const float scale = 0.13497836f;
5841 const int32_t offset = -7;
5842
5843 outputTensorInfo.SetQuantizationScale(scale);
5844 outputTensorInfo.SetQuantizationOffset(offset);
5845 inputTensorInfo1.SetQuantizationScale(scale);
5846 inputTensorInfo1.SetQuantizationOffset(offset);
5847 inputTensorInfo2.SetQuantizationScale(scale);
5848 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00005849
5850 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
5851
5852 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01005853 {
5854 1, 2, 3,
5855 4, 5, 6,
5856 7, 8, 9,
5857 10, 11, 12,
5858 13, 14, 15,
5859 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005860
surmeh013537c2c2018-05-18 16:31:43 +01005861 19, 20, 21,
5862 22, 23, 24,
5863 25, 26, 27,
5864 28, 29, 30,
5865 31, 32, 33,
5866 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005867
surmeh013537c2c2018-05-18 16:31:43 +01005868 37, 38, 39,
5869 40, 41, 42,
5870 43, 44, 45,
5871 46, 47, 48,
5872 49, 50, 51,
5873 52, 53, 54,
5874 })
telsoa014fcda012018-03-09 14:13:49 +00005875 );
5876
telsoa014fcda012018-03-09 14:13:49 +00005877 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
5878 {
surmeh013537c2c2018-05-18 16:31:43 +01005879 1, 2, 3,
5880 4, 5, 6,
5881 7, 8, 9,
5882 10, 11, 12,
5883 13, 14, 15,
5884 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005885
surmeh013537c2c2018-05-18 16:31:43 +01005886 19, 20, 21,
5887 22, 23, 24,
5888 25, 26, 27,
5889 28, 29, 30,
5890 31, 32, 33,
5891 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005892 })
5893 );
5894
5895 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
5896 {
surmeh013537c2c2018-05-18 16:31:43 +01005897 37, 38, 39,
5898 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00005899 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01005900 46, 47, 48,
5901 49, 50, 51,
5902 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00005903 })
5904 );
5905
telsoa01c577f2c2018-08-31 09:22:23 +01005906 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00005907 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
5908
telsoa01c577f2c2018-08-31 09:22:23 +01005909 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00005910 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
5911
telsoa014fcda012018-03-09 14:13:49 +00005912
5913 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5914
5915 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
5916
5917 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
5918 subTensorsSupported ?
5919 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
5920 workloadFactory.CreateTensorHandle(inputTensorInfo1);
5921
5922 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
5923 subTensorsSupported ?
5924 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
5925 workloadFactory.CreateTensorHandle(inputTensorInfo2);
5926
telsoa014fcda012018-03-09 14:13:49 +00005927
5928 armnn::MergerQueueDescriptor data;
5929 armnn::WorkloadInfo info;
5930 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5931 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00005932 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5933
5934 data.m_ViewOrigins.push_back(window1);
5935 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00005936
5937 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
5938
5939 inputHandle1->Allocate();
5940 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005941 outputHandle->Allocate();
5942
5943 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
5944 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005945
5946 workload->Execute();
5947
5948 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
5949
5950 return ret;
5951}
5952
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005953LayerTestResult<uint8_t, 4> AdditionUint8Test(
5954 armnn::IWorkloadFactory& workloadFactory,
5955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005956{
5957 unsigned int batchSize = 1;
5958 unsigned int channels = 2;
5959 unsigned int height = 2;
5960 unsigned int width = 3;
5961
5962 const float scale = 7.0f;
5963 const int32_t offset = 3;
5964
5965 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
5966 armnn::TensorInfo outputTensorInfo;
5967
5968 const unsigned int shape[] = { batchSize, channels, height, width };
5969 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5970 inputTensorInfo1.SetQuantizationScale(scale);
5971 inputTensorInfo1.SetQuantizationOffset(offset);
5972
5973 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5974 inputTensorInfo2.SetQuantizationScale(scale);
5975 inputTensorInfo2.SetQuantizationOffset(offset);
5976
5977 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5978 outputTensorInfo.SetQuantizationScale(scale);
5979 outputTensorInfo.SetQuantizationOffset(offset);
5980
telsoa01c577f2c2018-08-31 09:22:23 +01005981 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005982 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5983 {
5984 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
5985 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
5986 }));
5987
telsoa01c577f2c2018-08-31 09:22:23 +01005988 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005989 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5990 {
5991 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
5992 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
5993 }));
5994
telsoa01c577f2c2018-08-31 09:22:23 +01005995 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005996 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5997 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
5998 {
5999 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
6000 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
6001 }));
6002
6003 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6004 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
6005 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6006
6007 armnn::AdditionQueueDescriptor data;
6008 armnn::WorkloadInfo info;
6009 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6010 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6011 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6012
6013 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6014
6015 inputHandle1->Allocate();
6016 inputHandle2->Allocate();
6017 outputHandle->Allocate();
6018
6019 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6020 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
6021
6022 workload->Execute();
6023
6024 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6025
6026 return result;
6027}
6028
surmeh01bceff2f2018-03-29 16:29:27 +01006029namespace
telsoa014fcda012018-03-09 14:13:49 +00006030{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006031LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
6032 armnn::IWorkloadFactory& workloadFactory,
6033 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6034 const unsigned int shape0[4],
6035 const std::vector<uint8_t> & values0,
6036 float scale0,
6037 int32_t offset0,
6038 const unsigned int shape1[4],
6039 const std::vector<uint8_t> & values1,
6040 float scale1,
6041 int32_t offset1,
6042 const unsigned int outShape[4],
6043 const std::vector<uint8_t> & outValues,
6044 float outScale,
6045 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006046{
6047 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
6048 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
6049 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006050
surmeh01bceff2f2018-03-29 16:29:27 +01006051 inputTensorInfo0.SetQuantizationScale(scale0);
6052 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006053
surmeh01bceff2f2018-03-29 16:29:27 +01006054 inputTensorInfo1.SetQuantizationScale(scale1);
6055 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006056
surmeh01bceff2f2018-03-29 16:29:27 +01006057 outputTensorInfo.SetQuantizationScale(outScale);
6058 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006059
surmeh01bceff2f2018-03-29 16:29:27 +01006060 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
6061 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006062
telsoa014fcda012018-03-09 14:13:49 +00006063 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01006064 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00006065
surmeh01bceff2f2018-03-29 16:29:27 +01006066 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00006067 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00006068 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6069
6070 armnn::MultiplicationQueueDescriptor data;
6071 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01006072 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6073 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00006074 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6075
6076 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
6077
surmeh01bceff2f2018-03-29 16:29:27 +01006078 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006079 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006080 outputHandle->Allocate();
6081
surmeh01bceff2f2018-03-29 16:29:27 +01006082 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006083 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006084
6085 workload->Execute();
6086
6087 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6088
6089 return result;
6090}
surmeh01bceff2f2018-03-29 16:29:27 +01006091} // anonymous namespace
6092
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006093LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
6094 armnn::IWorkloadFactory& workloadFactory,
6095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006096{
6097 unsigned int batchSize = 1;
6098 unsigned int channels = 2;
6099 unsigned int height = 2;
6100 unsigned int width = 3;
6101 const unsigned int shape[] = { batchSize, channels, height, width };
6102
telsoa01c577f2c2018-08-31 09:22:23 +01006103 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006104 std::vector<uint8_t> input0({
6105 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
6106 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
6107 });
6108
telsoa01c577f2c2018-08-31 09:22:23 +01006109 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006110 std::vector<uint8_t> input1({
6111 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
6112 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
6113 });
6114
telsoa01c577f2c2018-08-31 09:22:23 +01006115 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01006116 std::vector<uint8_t> output(
6117 {
6118 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
6119 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
6120 });
6121
6122 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006123 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006124 shape,
6125 input0,
6126 4.0f,
6127 1,
6128 shape,
6129 input1,
6130 3.0f,
6131 -2,
6132 shape,
6133 output,
telsoa01c577f2c2018-08-31 09:22:23 +01006134 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01006135 -5);
6136}
6137
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006138LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
6139 armnn::IWorkloadFactory& workloadFactory,
6140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006141{
6142 const unsigned int shape0[] = { 1, 2, 2, 3 };
6143 const unsigned int shape1[] = { 1, 1, 1, 1 };
6144
6145 std::vector<uint8_t> input0({
6146 1, 2, 3, 4, 5, 6,
6147 7, 8, 9, 10, 11, 12
6148 });
6149
6150 std::vector<uint8_t> input1({2});
6151
6152 std::vector<uint8_t> output({
6153 2, 4, 6, 8, 10, 12,
6154 14, 16, 18, 20, 22, 24
6155 });
6156
6157 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006158 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006159 shape0,
6160 input0,
6161 1.0f,
6162 0,
6163 shape1,
6164 input1,
6165 1.0f,
6166 0,
6167 shape0,
6168 output,
6169 1.0f,
6170 0);
6171}
6172
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006173LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
6174 armnn::IWorkloadFactory& workloadFactory,
6175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006176{
6177 const unsigned int shape0[] = { 1, 2, 2, 3 };
6178 const unsigned int shape1[] = { 1, 1, 1, 3 };
6179
6180 std::vector<uint8_t> input0({
6181 1, 2, 3, 4, 5, 6,
6182 7, 8, 9, 10, 11, 12
6183 });
6184
6185 std::vector<uint8_t> input1({1, 2, 3});
6186
6187 std::vector<uint8_t> output({
6188 1, 4, 9, 4, 10, 18,
6189 7, 16, 27, 10, 22, 36
6190 });
6191
6192 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006193 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006194 shape0,
6195 input0,
6196 1.0f,
6197 0,
6198 shape1,
6199 input1,
6200 1.0f,
6201 0,
6202 shape0,
6203 output,
6204 1.0f,
6205 0);
6206}
telsoa014fcda012018-03-09 14:13:49 +00006207
David Beckf195f032018-09-06 16:46:34 +01006208namespace
6209{
6210template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006211LayerTestResult<T, 4> SubtractionTestHelper(
6212 armnn::IWorkloadFactory& workloadFactory,
6213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6214 const unsigned int shape0[4],
6215 const std::vector<T>& values0,
6216 float scale0,
6217 int32_t offset0,
6218 const unsigned int shape1[4],
6219 const std::vector<T> & values1,
6220 float scale1,
6221 int32_t offset1,
6222 const unsigned int outShape[4],
6223 const std::vector<T> & outValues,
6224 float outScale,
6225 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01006226{
6227 auto dataType = (std::is_same<T, uint8_t>::value ?
6228 armnn::DataType::QuantisedAsymm8 :
6229 armnn::DataType::Float32);
6230
6231 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6232 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6233 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
6234
6235 inputTensorInfo0.SetQuantizationScale(scale0);
6236 inputTensorInfo0.SetQuantizationOffset(offset0);
6237
6238 inputTensorInfo1.SetQuantizationScale(scale1);
6239 inputTensorInfo1.SetQuantizationOffset(offset1);
6240
6241 outputTensorInfo.SetQuantizationScale(outScale);
6242 outputTensorInfo.SetQuantizationOffset(outOffset);
6243
6244 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6245 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6246
6247 LayerTestResult<T, 4> result(outputTensorInfo);
6248 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6249
6250 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6251 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6252 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6253
6254 armnn::SubtractionQueueDescriptor data;
6255 armnn::WorkloadInfo info;
6256 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6257 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6258 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6259
6260 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
6261
6262 inputHandle0->Allocate();
6263 inputHandle1->Allocate();
6264 outputHandle->Allocate();
6265
6266 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6267 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6268
David Beckf195f032018-09-06 16:46:34 +01006269 workload->Execute();
6270
6271 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6272
6273 return result;
6274}
6275} // anonymous namespace
6276
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006277LayerTestResult<uint8_t, 4> SubtractionUint8Test(
6278 armnn::IWorkloadFactory& workloadFactory,
6279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006280{
6281 const unsigned int shape0[] = { 1, 1, 2, 2 };
6282 const unsigned int shape1[] = { 1, 1, 2, 2 };
6283
6284 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6285 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
6286 std::vector<uint8_t> output({ 3, 3, 5, 5 });
6287
6288 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006289 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006290 shape0, input0, 0.5f, 2,
6291 shape1, input1, 1.0f, 0,
6292 shape0, output, 1.0f, 0);
6293}
6294
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006295LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
6296 armnn::IWorkloadFactory& workloadFactory,
6297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006298{
6299 const unsigned int shape0[] = { 1, 1, 2, 2 };
6300 const unsigned int shape1[] = { 1, 1, 1, 1 };
6301
6302 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6303 std::vector<uint8_t> input1({ 2 });
6304 std::vector<uint8_t> output({ 5, 6, 7, 8 });
6305
6306 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006307 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006308 shape0, input0, 0.5f, 2,
6309 shape1, input1, 1.0f, 0,
6310 shape0, output, 1.0f, 3);
6311}
6312
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006313LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
6314 armnn::IWorkloadFactory& workloadFactory,
6315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006316{
6317 const unsigned int shape0[] = { 1, 1, 2, 2 };
6318 const unsigned int shape1[] = { 1, 1, 2, 1 };
6319
6320 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6321 std::vector<uint8_t> input1({ 2, 1 });
6322 std::vector<uint8_t> output({ 8, 11, 12, 15 });
6323
6324 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006325 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006326 shape0, input0, 1.0f, 0,
6327 shape1, input1, 1.0f, 0,
6328 shape0, output, 1.0f, 0);
6329}
6330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006331LayerTestResult<float, 4> SubtractionTest(
6332 armnn::IWorkloadFactory& workloadFactory,
6333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006334{
6335 const unsigned int shape0[] = { 1, 1, 2, 2 };
6336 const unsigned int shape1[] = { 1, 1, 2, 2 };
6337
6338 std::vector<float> input0({ 1, 2, 3, 4 });
6339 std::vector<float> input1({ 1, -1, 0, 2 });
6340 std::vector<float> output({ 0, 3, 3, 2 });
6341
6342 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006343 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006344 shape0, input0, 1.0f, 0,
6345 shape1, input1, 1.0f, 0,
6346 shape0, output, 1.0f, 0);
6347}
6348
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006349LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
6350 armnn::IWorkloadFactory& workloadFactory,
6351 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006352{
6353 const unsigned int shape0[] = { 1, 1, 2, 2 };
6354 const unsigned int shape1[] = { 1, 1, 1, 1 };
6355
6356 std::vector<float> input0({ 1, 2, 3, 4 });
6357 std::vector<float> input1({ 10 });
6358 std::vector<float> output({ -9, -8, -7, -6 });
6359
6360 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006361 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006362 shape0, input0, 1.0f, 0,
6363 shape1, input1, 1.0f, 0,
6364 shape0, output, 1.0f, 0);
6365}
6366
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006367LayerTestResult<float, 4> SubtractionBroadcastTest(
6368 armnn::IWorkloadFactory& workloadFactory,
6369 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006370{
6371 const unsigned int shape0[] = { 1, 1, 2, 2 };
6372 const unsigned int shape1[] = { 1, 1, 1, 2 };
6373
6374 std::vector<float> input0({ 1, 2, 3, 4 });
6375 std::vector<float> input1({ 10, -5 });
6376 std::vector<float> output({ -9, 7, -7, 9 });
6377
6378 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006379 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006380 shape0, input0, 1.0f, 0,
6381 shape1, input1, 1.0f, 0,
6382 shape0, output, 1.0f, 0);
6383}
6384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006385LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
6386 armnn::IWorkloadFactory& workloadFactory,
6387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006388{
6389 constexpr unsigned int inputWidth = 4;
6390 constexpr unsigned int inputHeight = 4;
6391 constexpr unsigned int inputChannels = 1;
6392 constexpr unsigned int inputBatchSize = 1;
6393
6394 constexpr unsigned int outputWidth = inputWidth;
6395 constexpr unsigned int outputHeight = inputHeight;
6396 constexpr unsigned int outputChannels = inputChannels;
6397 constexpr unsigned int outputBatchSize = inputBatchSize;
6398
6399 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6400 armnn::DataType::QuantisedAsymm8);
6401 inputTensorInfo.SetQuantizationScale(1.5f);
6402 inputTensorInfo.SetQuantizationOffset(-3);
6403
6404 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6405 armnn::DataType::QuantisedAsymm8);
6406 outputTensorInfo.SetQuantizationScale(1.5f);
6407 outputTensorInfo.SetQuantizationOffset(-3);
6408
6409 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6410 1, 2, 3, 4,
6411 2, 3, 4, 5,
6412 3, 4, 5, 6,
6413 4, 5, 6, 7
6414 }));
6415
6416 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6417 result.outputExpected = input;
6418
6419 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6420 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6421
6422 armnn::ResizeBilinearQueueDescriptor descriptor;
6423 armnn::WorkloadInfo info;
6424 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6425 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6426
6427 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6428
6429 inputHandle->Allocate();
6430 outputHandle->Allocate();
6431 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6432
6433 workload->Execute();
6434
6435 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6436 return result;
6437}
6438
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006439LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
6440 armnn::IWorkloadFactory& workloadFactory,
6441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006442{
6443 constexpr unsigned int inputWidth = 2;
6444 constexpr unsigned int inputHeight = 2;
6445 constexpr unsigned int inputChannels = 1;
6446 constexpr unsigned int inputBatchSize = 1;
6447
6448 constexpr unsigned int outputWidth = inputWidth / 2;
6449 constexpr unsigned int outputHeight = inputHeight / 2;
6450 constexpr unsigned int outputChannels = inputChannels;
6451 constexpr unsigned int outputBatchSize = inputBatchSize;
6452
6453 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6454 armnn::DataType::QuantisedAsymm8);
6455 inputTensorInfo.SetQuantizationScale(0.1567f);
6456 inputTensorInfo.SetQuantizationOffset(1);
6457
6458 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6459 armnn::DataType::QuantisedAsymm8);
6460 outputTensorInfo.SetQuantizationScale(0.1567f);
6461 outputTensorInfo.SetQuantizationOffset(1);
6462
6463 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6464 1, 255,
6465 200, 250
6466 }));
6467
6468 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
6469 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01006470 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00006471 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
6472 // the centre).
6473 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6474 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6475 1
6476 }));
6477
6478 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6479 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6480
6481 armnn::ResizeBilinearQueueDescriptor descriptor;
6482 armnn::WorkloadInfo info;
6483 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6484 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6485
6486 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6487
6488 inputHandle->Allocate();
6489 outputHandle->Allocate();
6490 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6491
6492 workload->Execute();
6493
6494 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6495 return result;
6496}
6497
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006498LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
6499 armnn::IWorkloadFactory& workloadFactory,
6500 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006501{
6502 constexpr unsigned int inputWidth = 4;
6503 constexpr unsigned int inputHeight = 4;
6504 constexpr unsigned int inputChannels = 1;
6505 constexpr unsigned int inputBatchSize = 1;
6506
6507 constexpr unsigned int outputWidth = inputWidth / 2;
6508 constexpr unsigned int outputHeight = inputHeight / 2;
6509 constexpr unsigned int outputChannels = inputChannels;
6510 constexpr unsigned int outputBatchSize = inputBatchSize;
6511
6512 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6513 armnn::DataType::QuantisedAsymm8);
6514 inputTensorInfo.SetQuantizationScale(3.141592f);
6515 inputTensorInfo.SetQuantizationOffset(3);
6516
6517 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6518 armnn::DataType::QuantisedAsymm8);
6519 outputTensorInfo.SetQuantizationScale(3.141592f);
6520 outputTensorInfo.SetQuantizationOffset(3);
6521
6522 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6523 1, 2, 3, 4,
6524 2, 3, 4, 5,
6525 3, 4, 5, 6,
6526 4, 5, 6, 7
6527 }));
6528
6529 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6530 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6531 1, 3,
6532 3, 5
6533 }));
6534
6535 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6536 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6537
6538 armnn::ResizeBilinearQueueDescriptor descriptor;
6539 armnn::WorkloadInfo info;
6540 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6541 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6542
6543 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6544
6545 inputHandle->Allocate();
6546 outputHandle->Allocate();
6547 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6548
6549 workload->Execute();
6550
6551 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6552 return result;
6553}
6554
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006555LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
6556 armnn::IWorkloadFactory& workloadFactory,
6557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006558{
6559 constexpr unsigned int inputWidth = 3;
6560 constexpr unsigned int inputHeight = 2;
6561 constexpr unsigned int inputChannels = 1;
6562 constexpr unsigned int inputBatchSize = 1;
6563
6564 constexpr unsigned int outputWidth = 2;
6565 constexpr unsigned int outputHeight = 1;
6566 constexpr unsigned int outputChannels = inputChannels;
6567 constexpr unsigned int outputBatchSize = inputBatchSize;
6568
6569 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6570 armnn::DataType::QuantisedAsymm8);
6571 inputTensorInfo.SetQuantizationScale(1.5f);
6572 inputTensorInfo.SetQuantizationOffset(-1);
6573
6574 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6575 armnn::DataType::QuantisedAsymm8);
6576 outputTensorInfo.SetQuantizationScale(1.5f);
6577 outputTensorInfo.SetQuantizationOffset(-1);
6578
6579 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6580 1, 2, 3, // 3.0, 4.5, 6.0
6581 5, 8, 13 // 9.0, 13.5, 21.0
6582 }));
6583
6584 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6585 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6586 1, 3 // 3.0, 5.25
6587 }));
6588
6589 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6590 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6591
6592 armnn::ResizeBilinearQueueDescriptor descriptor;
6593 armnn::WorkloadInfo info;
6594 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6595 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6596
6597 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6598
6599 inputHandle->Allocate();
6600 outputHandle->Allocate();
6601
6602 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6603
6604 workload->Execute();
6605
6606 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6607 return result;
6608}
6609
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006610LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
6611 armnn::IWorkloadFactory& workloadFactory,
6612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006613{
6614 constexpr unsigned int inputWidth = 2;
6615 constexpr unsigned int inputHeight = 3;
6616 constexpr unsigned int inputChannels = 1;
6617 constexpr unsigned int inputBatchSize = 1;
6618
6619 constexpr unsigned int outputWidth = 5;
6620 constexpr unsigned int outputHeight = 3;
6621 constexpr unsigned int outputChannels = inputChannels;
6622 constexpr unsigned int outputBatchSize = inputBatchSize;
6623
6624 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6625 armnn::DataType::QuantisedAsymm8);
6626 inputTensorInfo.SetQuantizationScale(0.010765f);
6627 inputTensorInfo.SetQuantizationOffset(7);
6628
6629 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6630 armnn::DataType::QuantisedAsymm8);
6631 outputTensorInfo.SetQuantizationScale(0.010132f);
6632 outputTensorInfo.SetQuantizationOffset(-18);
6633
6634 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6635 24, 228, // 0.183005, 2.379065,
6636 105, 128, // 1.05497, 1.302565
6637 230, 71 // 2.400595, 0.68896
6638 }));
6639
6640 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6641 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6642 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
6643 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
6644 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
6645 }));
6646
6647 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6648 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6649
6650 armnn::ResizeBilinearQueueDescriptor descriptor;
6651 armnn::WorkloadInfo info;
6652 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6653 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6654
6655 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6656
6657 inputHandle->Allocate();
6658 outputHandle->Allocate();
6659 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6660
6661 workload->Execute();
6662
6663 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6664 return result;
6665}
6666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006667LayerTestResult<float, 4> BatchNormTest(
6668 armnn::IWorkloadFactory& workloadFactory,
6669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006670{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006671 // BatchSize: 1
6672 // Channels: 2
6673 // Height: 3
6674 // Width: 2
6675
6676 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6677 std::vector<float> inputValues
6678 {
6679 // Batch 0, Channel 0, Height (3) x Width (2)
6680 1.f, 4.f,
6681 4.f, 2.f,
6682 1.f, 6.f,
6683
6684 // Batch 0, Channel 1, Height (3) x Width (2)
6685 1.f, 1.f,
6686 4.f, 1.f,
6687 -2.f, 4.f
6688 };
6689 std::vector<float> expectedOutputValues
6690 {
6691 // Batch 0, Channel 0, Height (3) x Width (2)
6692 1.f, 4.f,
6693 4.f, 2.f,
6694 1.f, 6.f,
6695
6696 // Batch 0, Channel 1, Height (3) x Width (2)
6697 3.f, 3.f,
6698 4.f, 3.f,
6699 2.f, 4.f
6700 };
6701
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006702 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6703 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006704 0.f, 0, armnn::DataLayout::NCHW);
6705}
6706
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006707LayerTestResult<float, 4> BatchNormNhwcTest(
6708 armnn::IWorkloadFactory& workloadFactory,
6709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006710{
6711 // BatchSize: 1
6712 // Height: 3
6713 // Width: 2
6714 // Channels: 2
6715
6716 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6717 std::vector<float> inputValues
6718 {
6719 // Batch 0, Height 0, Width (2) x Channel (2)
6720 1.f, 1.f,
6721 4.f, 1.f,
6722
6723 // Batch 0, Height 1, Width (2) x Channel (2)
6724 4.f, 4.f,
6725 2.f, 1.f,
6726
6727 // Batch 0, Height 2, Width (2) x Channel (2)
6728 1.f, -2.f,
6729 6.f, 4.f
6730 };
6731 std::vector<float> expectedOutputValues
6732 {
6733 // Batch 0, Height 0, Width (2) x Channel (2)
6734 1.f, 3.f,
6735 4.f, 3.f,
6736
6737 // Batch 0, Height 1, Width (2) x Channel (2)
6738 4.f, 4.f,
6739 2.f, 3.f,
6740
6741 // Batch 0, Height 2, Width (2) x Channel (2)
6742 1.f, 2.f,
6743 6.f, 4.f
6744 };
6745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006746 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6747 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006748 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006749}
6750
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006751LayerTestResult<uint8_t, 4> BatchNormUint8Test(
6752 armnn::IWorkloadFactory& workloadFactory,
6753 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006754{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006755 // BatchSize: 1
6756 // Channels: 2
6757 // Height: 3
6758 // Width: 2
6759
6760 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6761 std::vector<float> inputValues
6762 {
6763 // Batch 0, Channel 0, Height (3) x Width (2)
6764 1.f, 4.f,
6765 4.f, 2.f,
6766 1.f, 6.f,
6767
6768 // Batch 0, Channel 1, Height (3) x Width (2)
6769 1.f, 1.f,
6770 4.f, 1.f,
6771 -2.f, 4.f
6772 };
6773 std::vector<float> expectedOutputValues
6774 {
6775 // Batch 0, Channel 0, Height (3) x Width (2)
6776 1.f, 4.f,
6777 4.f, 2.f,
6778 1.f, 6.f,
6779
6780 // Batch 0, Channel 1, Height (3) x Width (2)
6781 3.f, 3.f,
6782 4.f, 3.f,
6783 2.f, 4.f
6784 };
6785
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006786 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6787 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006788 1.f/20.f, 50, armnn::DataLayout::NCHW);
6789}
6790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006791LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6792 armnn::IWorkloadFactory& workloadFactory,
6793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006794{
6795 // BatchSize: 1
6796 // Height: 3
6797 // Width: 2
6798 // Channels: 2
6799
6800 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6801 std::vector<float> inputValues
6802 {
6803 // Batch 0, Height 0, Width (2) x Channel (2)
6804 1.f, 1.f,
6805 4.f, 1.f,
6806
6807 // Batch 0, Height 1, Width (2) x Channel (2)
6808 4.f, 4.f,
6809 2.f, 1.f,
6810
6811 // Batch 0, Height 2, Width (2) x Channel (2)
6812 1.f, -2.f,
6813 6.f, 4.f
6814 };
6815 std::vector<float> expectedOutputValues
6816 {
6817 // Batch 0, Height 0, Width (2) x Channel (2)
6818 1.f, 3.f,
6819 4.f, 3.f,
6820
6821 // Batch 0, Height 1, Width (2) x Channel (2)
6822 4.f, 4.f,
6823 2.f, 3.f,
6824
6825 // Batch 0, Height 2, Width (2) x Channel (2)
6826 1.f, 2.f,
6827 6.f, 4.f
6828 };
6829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006830 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6831 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006832 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006833}
6834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006835LayerTestResult<uint8_t, 4> ConstantUint8Test(
6836 armnn::IWorkloadFactory& workloadFactory,
6837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006838{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006839 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00006840}
6841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006842LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
6843 armnn::IWorkloadFactory& workloadFactory,
6844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006845{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006846 return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006847}
6848
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006849LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
6850 armnn::IWorkloadFactory& workloadFactory,
6851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006852{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006853 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006854}
6855
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006856LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
6857 armnn::IWorkloadFactory& workloadFactory,
6858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006859{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006860 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006861}
6862
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006863LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
6864 armnn::IWorkloadFactory& workloadFactory,
6865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006866{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006867 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006868}
6869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006870LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
6871 armnn::IWorkloadFactory& workloadFactory,
6872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006873{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006874 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006875}
6876
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006877LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
6878 armnn::IWorkloadFactory& workloadFactory,
6879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006880{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006881 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006882}
6883
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006884LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
6885 armnn::IWorkloadFactory& workloadFactory,
6886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006887{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006888 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006889}
6890
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006891LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
6892 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6894 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00006895{
narpra015cdda352018-11-19 15:30:27 +00006896 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006897}
6898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006899LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
6900 armnn::IWorkloadFactory& workloadFactory,
6901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006902{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006903 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006904}
6905
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006906LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
6907 armnn::IWorkloadFactory& workloadFactory,
6908 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006909{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006910 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006911}
6912
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006913LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
6914 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6916 bool useSubtensor)
6917{
6918 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
6919}
6920
6921LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
6922 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006924{
narpra015cdda352018-11-19 15:30:27 +00006925 return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6926}
6927
6928LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
6929 armnn::IWorkloadFactory& workloadFactory,
6930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6931{
6932 return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6933}
6934
6935LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
6936 armnn::IWorkloadFactory& workloadFactory,
6937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6938{
6939 return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6940}
6941
6942LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
6943 armnn::IWorkloadFactory& workloadFactory,
6944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
6945{
6946 return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
6947}
6948
6949LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
6950 armnn::IWorkloadFactory& workloadFactory,
6951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6952{
6953 return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6954}
6955
6956LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
6957 armnn::IWorkloadFactory& workloadFactory,
6958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6959{
6960 return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6961}
6962
6963LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
6964 armnn::IWorkloadFactory& workloadFactory,
6965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6966{
6967 return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6968}
6969
6970LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
6971 armnn::IWorkloadFactory& workloadFactory,
6972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6973 bool useSubtensor)
6974{
6975 return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006976}
6977
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006978LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
6979 armnn::IWorkloadFactory& workloadFactory,
6980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6981 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006982{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006983 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00006984}
6985
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006986LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
6987 armnn::IWorkloadFactory& workloadFactory,
6988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6989 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006990{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006991 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
6992 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00006993}
6994
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006995LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
6996 armnn::IWorkloadFactory& workloadFactory,
6997 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6998 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006999{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007000 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00007001}
7002
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007003LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
7004 armnn::IWorkloadFactory& workloadFactory,
7005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7006 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00007007{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007008 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
7009 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007010}
7011
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007012LayerTestResult<float, 4> SimpleMaxPooling2dTest(
7013 armnn::IWorkloadFactory& workloadFactory,
7014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007015 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007016{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007017 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007018}
7019
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007020LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
7021 armnn::IWorkloadFactory& workloadFactory,
7022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007023 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01007024{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007025 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01007026}
7027
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007028LayerTestResult<float, 4> SimpleAveragePooling2dTest(
7029 armnn::IWorkloadFactory& workloadFactory,
7030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007031 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007032{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007033 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01007034}
7035
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007036LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
7037 armnn::IWorkloadFactory& workloadFactory,
7038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007039 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01007040{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007041 return SimpleAveragePooling2dTestCommon<uint8_t>(
7042 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007043}
7044
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007045LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
7046 armnn::IWorkloadFactory& workloadFactory,
7047 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7048 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01007049{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007050 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
7051 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01007052}
7053
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007054LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
7055 armnn::IWorkloadFactory& workloadFactory,
7056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007057{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007058 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007059}
7060
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007061LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
7062 armnn::IWorkloadFactory& workloadFactory,
7063 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007064{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007065 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00007066}
7067
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007068LayerTestResult<float, 4> SimpleL2Pooling2dTest(
7069 armnn::IWorkloadFactory& workloadFactory,
7070 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007071 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007072{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007073 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007074}
7075
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007076LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
7077 armnn::IWorkloadFactory& workloadFactory,
7078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00007079 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00007080{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007081 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00007082}
7083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007084LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
7085 armnn::IWorkloadFactory& workloadFactory,
7086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007087{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007088 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007089}
7090
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007091LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
7092 armnn::IWorkloadFactory& workloadFactory,
7093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007094{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007095 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007096}
7097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007098LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
7099 armnn::IWorkloadFactory& workloadFactory,
7100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007101{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007102 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007103}
7104
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007105LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
7106 armnn::IWorkloadFactory& workloadFactory,
7107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007108{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007109 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007110}
7111
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007112LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
7113 armnn::IWorkloadFactory& workloadFactory,
7114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007115{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007116 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007117}
7118
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007119LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
7120 armnn::IWorkloadFactory& workloadFactory,
7121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007122{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007123 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007124}
7125
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007126LayerTestResult<float, 4> L2Pooling2dSize7Test(
7127 armnn::IWorkloadFactory& workloadFactory,
7128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007129{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007130 return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007131}
7132
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007133LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
7134 armnn::IWorkloadFactory& workloadFactory,
7135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007136{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007137 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007138}
7139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007140LayerTestResult<float, 4> L2Pooling2dSize9Test(
7141 armnn::IWorkloadFactory& workloadFactory,
7142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007143{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007144 return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007145}
7146
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007147LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
7148 armnn::IWorkloadFactory& workloadFactory,
7149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007150{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007151 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007152}
7153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007154LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
7155 armnn::IWorkloadFactory& workloadFactory,
7156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007157{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007158 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007159}
7160
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007161LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
7162 armnn::IWorkloadFactory& workloadFactory,
7163 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007164{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007165 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007166}
7167
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007168LayerTestResult<float, 4> ComparePooling2dTest(
7169 armnn::IWorkloadFactory& workloadFactory,
7170 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7171 armnn::IWorkloadFactory& refWorkloadFactory,
7172 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007173{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007174 return ComparePooling2dTestCommon<float>(
7175 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00007176}
7177
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007178LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
7179 armnn::IWorkloadFactory& workloadFactory,
7180 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7181 armnn::IWorkloadFactory& refWorkloadFactory,
7182 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007183{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007184 return ComparePooling2dTestCommon<uint8_t>(
7185 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007186}
7187
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007188LayerTestResult<float, 2> FullyConnectedLargeTest(
7189 armnn::IWorkloadFactory& workloadFactory,
7190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7191 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00007192{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007193 return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00007194}
7195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007196LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
7197 armnn::IWorkloadFactory& workloadFactory,
7198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007199{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007200 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007201}
7202
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007203LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
7204 armnn::IWorkloadFactory& workloadFactory,
7205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007206{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007207 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007208}
7209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007210LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
7211 armnn::IWorkloadFactory& workloadFactory,
7212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007213{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007214 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007215}
7216
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007217LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
7218 armnn::IWorkloadFactory& workloadFactory,
7219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007220{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007221 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007222}
7223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007224LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
7225 armnn::IWorkloadFactory& workloadFactory,
7226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007227{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007228 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007229}
7230
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007231LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
7232 armnn::IWorkloadFactory& workloadFactory,
7233 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007234{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007235 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007236}
7237
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007238LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
7239 armnn::IWorkloadFactory& workloadFactory,
7240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007241{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007242 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007243}
7244
7245LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007246 armnn::IWorkloadFactory& workloadFactory,
7247 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007248{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007249 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007250}
7251
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007252LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
7253 armnn::IWorkloadFactory& workloadFactory,
7254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007255{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007256 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007257}
7258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007259LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
7260 armnn::IWorkloadFactory& workloadFactory,
7261 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007262{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007263 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007264}
7265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007266LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
7267 armnn::IWorkloadFactory& workloadFactory,
7268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007269{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007270 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007271}
7272
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007273LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
7274 armnn::IWorkloadFactory& workloadFactory,
7275 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007276{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007277 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007278}
7279
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007280LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
7281 armnn::IWorkloadFactory& workloadFactory,
7282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007283{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007284 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007285}
7286
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007287LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
7288 armnn::IWorkloadFactory& workloadFactory,
7289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007290{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007291 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007292}
7293
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007294LayerTestResult<float, 4> SimplePermuteFloat32Test(
7295 armnn::IWorkloadFactory& workloadFactory,
7296 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007297{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007298 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007299};
7300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007301LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
7302 armnn::IWorkloadFactory& workloadFactory,
7303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007304{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007305 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007306};
surmeh01bceff2f2018-03-29 16:29:27 +01007307
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007308LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
7309 armnn::IWorkloadFactory& workloadFactory,
7310 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007311{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007312 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007313};
7314
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007315LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
7316 armnn::IWorkloadFactory& workloadFactory,
7317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007318{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007319 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007320};
7321
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007322LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
7323 armnn::IWorkloadFactory& workloadFactory,
7324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007325{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007326 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01007327};
7328
7329namespace
7330{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007331
narpra011e4c31d2018-09-28 11:07:51 +01007332template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007333LayerTestResult<T, OutputDim> MeanTestHelper(
7334 armnn::IWorkloadFactory& workloadFactory,
7335 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7336 const unsigned int* inputShape,
7337 const std::vector<T>& inputData,
7338 const std::vector<unsigned int>& axis,
7339 bool keepDims,
7340 const unsigned int* outputShape,
7341 const std::vector<T>& outputData,
7342 float scale = 1.0f,
7343 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01007344{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007345 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01007346
7347 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7348 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7349
7350 inputTensorInfo.SetQuantizationScale(scale);
7351 inputTensorInfo.SetQuantizationOffset(offset);
7352
7353 outputTensorInfo.SetQuantizationScale(scale);
7354 outputTensorInfo.SetQuantizationOffset(offset);
7355
7356 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7357
7358 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7359 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7360
7361 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7362 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7363
7364 armnn::MeanQueueDescriptor data;
7365 data.m_Parameters.m_Axis = axis;
7366 data.m_Parameters.m_KeepDims = keepDims;
7367 armnn::WorkloadInfo info;
7368 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7369 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7370
7371 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
7372
7373 inputHandle->Allocate();
7374 outputHandle->Allocate();
7375
7376 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7377
narpra011e4c31d2018-09-28 11:07:51 +01007378 workload->Execute();
7379
7380 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
7381
7382 return result;
7383}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007384
narpra011e4c31d2018-09-28 11:07:51 +01007385} // anonymous namespace
7386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007387LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
7388 armnn::IWorkloadFactory& workloadFactory,
7389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007390{
7391 const unsigned int inputShape[] = { 3, 2 };
7392 const unsigned int outputShape[] = { 1 };
7393
7394 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7395 std::vector<uint8_t> output({ 2 });
7396
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007397 return MeanTestHelper<uint8_t, 2, 1>(
7398 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007399}
7400
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007401LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
7402 armnn::IWorkloadFactory& workloadFactory,
7403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007404{
7405 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7406 const unsigned int outputShape[] = { 1, 1, 2 };
7407
7408 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7409 std::vector<uint8_t> output({ 2, 2 });
7410
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007411 return MeanTestHelper<uint8_t, 4, 3>(
7412 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007413}
7414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007415LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
7416 armnn::IWorkloadFactory& workloadFactory,
7417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007418{
7419 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7420 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7421
7422 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7423 std::vector<uint8_t> output({ 2, 2 });
7424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007425 return MeanTestHelper<uint8_t, 4, 4>(
7426 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007427}
7428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007429LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
7430 armnn::IWorkloadFactory& workloadFactory,
7431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007432{
7433 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7434 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7435
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007436 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01007437 std::vector<uint8_t> output({ 1, 3, 5 });
7438
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007439 return MeanTestHelper<uint8_t, 4, 4>(
7440 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007441}
7442
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007443LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
7444 armnn::IWorkloadFactory& workloadFactory,
7445 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007446{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007447 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007448 const unsigned int outputShape[] = { 2 };
7449
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007450 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
7451 24 });
7452 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01007453
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007454 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
7455 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007456 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01007457}
7458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007459LayerTestResult<float, 1> MeanFloatSimpleTest(
7460 armnn::IWorkloadFactory& workloadFactory,
7461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007462{
7463 const unsigned int inputShape[] = { 3, 2 };
7464 const unsigned int outputShape[] = { 1 };
7465
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007466 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7467 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007468
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007469 return MeanTestHelper<float, 2, 1>(
7470 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007471}
7472
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007473LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
7474 armnn::IWorkloadFactory& workloadFactory,
7475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007476{
7477 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7478 const unsigned int outputShape[] = { 3, 1, 2 };
7479
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007480 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7481 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007482
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007483 return MeanTestHelper<float, 4, 3>(
7484 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007485}
7486
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007487LayerTestResult<float, 4> MeanFloatKeepDimsTest(
7488 armnn::IWorkloadFactory& workloadFactory,
7489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007490{
7491 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7492 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7493
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007494 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7495 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007496
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007497 return MeanTestHelper<float, 4, 4>(
7498 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007499}
7500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007501LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
7502 armnn::IWorkloadFactory& workloadFactory,
7503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007504{
7505 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7506 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7507
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007508 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7509 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007511 return MeanTestHelper<float, 4, 4>(
7512 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007513}
7514
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007515LayerTestResult<float, 1> MeanVtsFloat1Test(
7516 armnn::IWorkloadFactory& workloadFactory,
7517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007518{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007519 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007520 const unsigned int outputShape[] = { 2 };
7521
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007522 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7523 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7524 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007525
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007526 return MeanTestHelper<float, 3, 1>(
7527 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007528}
7529
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007530LayerTestResult<float, 3> MeanVtsFloat2Test(
7531 armnn::IWorkloadFactory& workloadFactory,
7532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007533{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007534 const unsigned int inputShape[] = { 4, 3, 2 };
7535 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01007536
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007537 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7538 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7539 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007540
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007541 return MeanTestHelper<float, 3, 3>(
7542 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007543}
7544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007545LayerTestResult<float, 3> MeanVtsFloat3Test(
7546 armnn::IWorkloadFactory& workloadFactory,
7547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007548{
7549 const unsigned int inputShape[] = { 1, 2, 2, 1 };
7550 const unsigned int outputShape[] = { 1, 2, 1 };
7551
7552 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
7553 std::vector<float> output({ 1.5f, 3.5f });
7554
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007555 return MeanTestHelper<float, 4, 3>(
7556 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007557}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007558
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007559LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
7560 armnn::IWorkloadFactory& workloadFactory,
7561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007562{
7563 // Create Initial Tensor
7564 // 1, 2, 3
7565 // 4, 5, 6
7566 // 7, 8, 9
7567
7568 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
7569 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
7570
7571 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
7572 {1, 2, 3,
7573 4, 5, 6,
7574 7, 8, 9
7575 });
7576
7577 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
7578 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
7579 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
7580 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
7581
7582 // Apply MaxPool poolSize = 1x1, stride=2x2
7583 // Result =
7584 // 1, 3
7585 // 7, 9
7586 armnn::Pooling2dDescriptor descriptor;
7587 descriptor.m_PoolHeight = 1;
7588 descriptor.m_PoolWidth = 1;
7589 descriptor.m_StrideX = 2;
7590 descriptor.m_StrideY = 2;
7591 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
7592
7593 armnn::Pooling2dQueueDescriptor queueDescriptor;
7594 queueDescriptor.m_Parameters = descriptor;
7595 armnn::WorkloadInfo workloadInfo;
7596 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
7597 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
7598
7599 // Create the MaxPool
7600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
7601
7602 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
7603 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
7604 boost::multi_array<float, 4> resultMaxPool;
7605 resultMaxPool.resize(shape);
7606
7607
7608 // Create addition with another tensor the same size
7609 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
7610 // with the initial tensor.
7611 // 12, 16
7612 // 24, 28
7613
7614 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7615 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7616
7617 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
7618 {12, 16,
7619 24, 28,
7620 });
7621
7622 // Expected output tensor after MaxPool and Addition.
7623 LayerTestResult<float,4> addRet(addOutputTensorInfo);
7624 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
7625 {
7626 13, 19,
7627 31, 37
7628 }));
7629
7630 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
7631 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
7632
7633 armnn::AdditionQueueDescriptor data;
7634 armnn::WorkloadInfo info;
7635
7636 // Add the output of the MaxPool and the new tensor
7637 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
7638 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
7639 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
7640
7641 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
7642
7643 poolingInputHandle->Allocate();
7644 poolingOutputHandle->Allocate();
7645 addInputHandle->Allocate();
7646 addOutputHandle->Allocate();
7647
7648 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
7649 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
7650
7651 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
7652 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
7653
7654 workload->Execute();
7655 addWorkload->Execute();
7656
7657 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
7658
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007659 return addRet;
7660}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007661
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007662LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
7663 armnn::IWorkloadFactory& workloadFactory,
7664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007665{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007666 return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007667}
7668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007669LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
7670 armnn::IWorkloadFactory& workloadFactory,
7671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007672{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007673 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007674}
7675
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007676LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
7677 armnn::IWorkloadFactory& workloadFactory,
7678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007679{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007680 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007681}
7682
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007683LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
7684 armnn::IWorkloadFactory& workloadFactory,
7685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007686{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007687 return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007688}
7689
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007690LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
7691 armnn::IWorkloadFactory& workloadFactory,
7692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007693{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007694 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007695}
7696
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007697LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
7698 armnn::IWorkloadFactory& workloadFactory,
7699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007700{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007701 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007702}
7703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007704LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
7705 armnn::IWorkloadFactory& workloadFactory,
7706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007707{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007708 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007709}
7710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007711LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
7712 armnn::IWorkloadFactory& workloadFactory,
7713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007714{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007715 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007716}
7717
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007718LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
7719 armnn::IWorkloadFactory& workloadFactory,
7720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007721{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007722 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007723}
7724
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007725LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
7726 armnn::IWorkloadFactory& workloadFactory,
7727 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007728{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007729 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007730}
7731
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007732LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
7733 armnn::IWorkloadFactory& workloadFactory,
7734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007735{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007736 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007737}
7738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007739LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
7740 armnn::IWorkloadFactory& workloadFactory,
7741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007742{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007743 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007744}
7745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007746LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
7747 armnn::IWorkloadFactory& workloadFactory,
7748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007749{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007750 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007751}
7752
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007753LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
7754 armnn::IWorkloadFactory& workloadFactory,
7755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007756{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007757 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007758}
7759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007760LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
7761 armnn::IWorkloadFactory& workloadFactory,
7762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007763{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007764 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007765}
7766
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007767LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
7768 armnn::IWorkloadFactory& workloadFactory,
7769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007770{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007771 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007772}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007773
7774namespace {
7775
7776template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007777LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
7778 armnn::IWorkloadFactory &workloadFactory,
7779 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7780 const armnn::DataLayout& dataLayout,
7781 const unsigned int *inputShape,
7782 const std::vector<T> &inputData,
7783 const std::vector<unsigned int> &blockShape,
7784 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
7785 const unsigned int *outputShape,
7786 const std::vector<T> &outputData,
7787 float scale = 1.0f,
7788 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007789 {
7790 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
7791
7792 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7793 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7794
7795 inputTensorInfo.SetQuantizationScale(scale);
7796 inputTensorInfo.SetQuantizationOffset(offset);
7797
7798 outputTensorInfo.SetQuantizationScale(scale);
7799 outputTensorInfo.SetQuantizationOffset(offset);
7800
7801 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7802
7803 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7804 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7805
7806 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7807 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7808
7809 armnn::BatchToSpaceNdQueueDescriptor data;
7810 data.m_Parameters.m_DataLayout = dataLayout;
7811 data.m_Parameters.m_BlockShape = blockShape;
7812 data.m_Parameters.m_Crops = crops;
7813 armnn::WorkloadInfo info;
7814 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7815 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7816
7817 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
7818
7819 inputHandle->Allocate();
7820 outputHandle->Allocate();
7821
7822 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7823
7824 workload->Execute();
7825
7826 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7827
7828 return result;
7829}
7830
7831} // anonymous namespace
7832
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007833LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
7834 armnn::IWorkloadFactory& workloadFactory,
7835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007836{
7837 const unsigned int inputShape[] = {4, 2, 2, 1};
7838 const unsigned int outputShape[] = {1, 4, 4, 1 };
7839
7840 std::vector<float> input
7841 ({
7842 // Batch 0, Height 0, Width (2) x Channel (1)
7843 1.0f, 3.0f,
7844 // Batch 0, Height 1, Width (2) x Channel (1)
7845 9.0f, 11.0f,
7846
7847
7848 // Batch 1, Height 0, Width (2) x Channel (1)
7849 2.0f, 4.0f,
7850 // Batch 1, Height 1, Width (2) x Channel (1)
7851 10.0f, 12.0f,
7852
7853
7854 // Batch 2, Height 0, Width (2) x Channel (1)
7855 5.0f, 7.0f,
7856 // Batch 2, Height 1, Width (2) x Channel (1)
7857 13.0f, 15.0f,
7858
7859 // Batch 3, Height 0, Width (2) x Channel (3)
7860 6.0f, 8.0f,
7861 // Batch 3, Height 1, Width (2) x Channel (1)
7862 14.0f, 16.0f
7863 });
7864
7865 std::vector<float> expectedOutput
7866 ({
7867 1.0f, 2.0f, 3.0f, 4.0f,
7868 5.0f, 6.0f, 7.0f, 8.0f,
7869 9.0f, 10.0f, 11.0f, 12.0f,
7870 13.0f, 14.0f, 15.0f, 16.0f
7871 });
7872
7873 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007874 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007875
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007876 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7877 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007878 crops, outputShape, expectedOutput);
7879}
7880
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007881LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
7882 armnn::IWorkloadFactory& workloadFactory,
7883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007884{
7885 const unsigned int inputShape[] = {4, 1, 1, 1};
7886 const unsigned int outputShape[] = {1, 2, 2, 1};
7887
7888 std::vector<float> input
7889 ({
7890 // Batch 0, Height 0, Width (2) x Channel (1)
7891 1.0f, 2.0f, 3.0f, 4.0f
7892 });
7893
7894 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7895
7896 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007897 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007899 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7900 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7901 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007902}
7903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007904LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
7905 armnn::IWorkloadFactory& workloadFactory,
7906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007907{
7908 const unsigned int inputShape[] = {4, 1, 1, 3};
7909 const unsigned int outputShape[] = {1, 2, 2, 3};
7910
7911 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7912
7913 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7914
7915 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007916 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007918 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7919 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7920 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007921}
7922
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007923LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
7924 armnn::IWorkloadFactory &workloadFactory,
7925 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007926{
7927 const unsigned int inputShape[] = {4, 3, 1, 1};
7928 const unsigned int outputShape[] = {1, 3, 2, 2};
7929
7930 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7931
7932 std::vector<float> expectedOutput
7933 ({
7934 // Batch 0, Channel 0, Height (2) x Width (2)
7935 1.0f, 4.0f,
7936 7.0f, 10.0f,
7937
7938 // Batch 0, Channel 1, Height (2) x Width (2)
7939 2.0f, 5.0f,
7940 8.0f, 11.0f,
7941
7942 // Batch 0, Channel 2, Height (2) x Width (2)
7943 3.0f, 6.0f,
7944 9.0f, 12.0f,
7945 });
7946
7947 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007948 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007950 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7951 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7952 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007953}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007954
Mike Kelly831faed2018-11-28 11:52:08 +00007955LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
7956 armnn::IWorkloadFactory& workloadFactory,
7957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7958{
7959 const unsigned int inputShape[] = {4, 1, 1, 1};
7960 const unsigned int outputShape[] = {1, 1, 2, 2};
7961
7962 std::vector<float> input
7963 ({
7964 // Batch 0, Height 0, Width (2) x Channel (1)
7965 1.0f, 2.0f, 3.0f, 4.0f
7966 });
7967
7968 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7969
7970 std::vector<unsigned int> blockShape({2, 2});
7971 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7972
7973 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7974 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7975 crops, outputShape, expectedOutput);
7976}
7977
7978LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
7979 armnn::IWorkloadFactory& workloadFactory,
7980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7981{
7982 const unsigned int inputShape[] = {4, 3, 1, 1};
7983 const unsigned int outputShape[] = {1, 3, 2, 2};
7984
7985 std::vector<float> input({ 1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f });
7986
7987 std::vector<float> expectedOutput
7988 ({
7989 // Batch 0, Channel 0, Height (2) x Width (2)
7990 1.0f, 7.0f,
7991 2.0f, 8.0f,
7992
7993 // Batch 0, Channel 1, Height (2) x Width (2)
7994 3.0f, 9.0f,
7995 4.0f, 10.0f,
7996
7997 // Batch 0, Channel 2, Height (2) x Width (2)
7998 5.0f, 11.0f,
7999 6.0f, 12.0f,
8000 });
8001
8002 std::vector<unsigned int> blockShape({2, 2});
8003 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8004
8005 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
8006 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8007 crops, outputShape, expectedOutput);
8008}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008009
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008010LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
8011 armnn::IWorkloadFactory& workloadFactory,
8012 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00008013{
8014 const unsigned int inputShape[] = {4, 2, 2, 1};
8015 const unsigned int outputShape[] = {1, 4, 4, 1};
8016
8017 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 });
8018 std::vector<uint8_t> expectedOutput({ 1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
8019
8020 std::vector<unsigned int> blockShape({2, 2});
8021 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8022
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00008023 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
8024 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008025}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00008026
8027LayerTestResult<float, 4> StridedSlice4DFloat32Test(
8028 armnn::IWorkloadFactory& workloadFactory,
8029 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8030{
8031 return StridedSlice4DTest<float>(workloadFactory, memoryManager);
8032}
8033
8034LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
8035 armnn::IWorkloadFactory& workloadFactory,
8036 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8037{
8038 return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
8039}
8040
8041LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
8042 armnn::IWorkloadFactory& workloadFactory,
8043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8044{
8045 return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
8046}
8047
8048LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
8049 armnn::IWorkloadFactory& workloadFactory,
8050 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8051{
8052 return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
8053}
8054
8055LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
8056 armnn::IWorkloadFactory& workloadFactory,
8057 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8058{
8059 return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
8060}
8061
8062LayerTestResult<float, 3> StridedSlice3DFloat32Test(
8063 armnn::IWorkloadFactory& workloadFactory,
8064 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8065{
8066 return StridedSlice3DTest<float>(workloadFactory, memoryManager);
8067}
8068
8069LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
8070 armnn::IWorkloadFactory& workloadFactory,
8071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8072{
8073 return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
8074}
8075
8076LayerTestResult<float, 2> StridedSlice2DFloat32Test(
8077 armnn::IWorkloadFactory& workloadFactory,
8078 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8079{
8080 return StridedSlice2DTest<float>(workloadFactory, memoryManager);
8081}
8082
8083LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
8084 armnn::IWorkloadFactory& workloadFactory,
8085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8086{
8087 return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
8088}
8089
8090LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
8091 armnn::IWorkloadFactory& workloadFactory,
8092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8093{
8094 return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
8095}
8096
8097LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
8098 armnn::IWorkloadFactory& workloadFactory,
8099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8100{
8101 return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
8102}
8103
8104LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
8105 armnn::IWorkloadFactory& workloadFactory,
8106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8107{
8108 return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
8109}
8110
8111LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
8112 armnn::IWorkloadFactory& workloadFactory,
8113 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8114{
8115 return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
8116}
8117
8118LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
8119 armnn::IWorkloadFactory& workloadFactory,
8120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8121{
8122 return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
8123}
8124
8125LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
8126 armnn::IWorkloadFactory& workloadFactory,
8127 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8128{
8129 return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
8130}
8131
8132LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
8133 armnn::IWorkloadFactory& workloadFactory,
8134 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8135{
8136 return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
8137}
8138
8139LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
8140 armnn::IWorkloadFactory& workloadFactory,
8141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8142{
8143 return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
8144}
8145
8146LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
8147 armnn::IWorkloadFactory& workloadFactory,
8148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8149{
8150 return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
8151}
Mike Kelly831faed2018-11-28 11:52:08 +00008152LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
8153 armnn::IWorkloadFactory& workloadFactory,
8154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8155{
8156 const unsigned int inputShape[] = {4, 1, 1, 1};
8157 const unsigned int outputShape[] = {1, 2, 2, 1};
8158
8159 std::vector<uint8_t> input
8160 ({
8161 // Batch 0, Height 0, Width (2) x Channel (1)
8162 1, 2, 3, 4
8163 });
8164
8165 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8166
8167 std::vector<unsigned int> blockShape({2, 2});
8168 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8169
8170 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8171 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8172 crops, outputShape, expectedOutput);
8173}
8174
8175LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
8176 armnn::IWorkloadFactory& workloadFactory,
8177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8178{
8179 const unsigned int inputShape[] = {4, 1, 1, 3};
8180 const unsigned int outputShape[] = {1, 2, 2, 3};
8181
8182 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8183
8184 std::vector<uint8_t> expectedOutput({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8185
8186 std::vector<unsigned int> blockShape({2, 2});
8187 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8188
8189 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8190 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8191 crops, outputShape, expectedOutput);
8192}
8193
8194
8195LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
8196 armnn::IWorkloadFactory &workloadFactory,
8197 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8198{
8199 const unsigned int inputShape[] = {4, 3, 1, 1};
8200 const unsigned int outputShape[] = {1, 3, 2, 2};
8201
8202 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8203
8204 std::vector<uint8_t> expectedOutput
8205 ({
8206 // Batch 0, Channel 0, Height (2) x Width (2)
8207 1, 4,
8208 7, 10,
8209
8210 // Batch 0, Channel 1, Height (2) x Width (2)
8211 2, 5,
8212 8, 11,
8213
8214 // Batch 0, Channel 2, Height (2) x Width (2)
8215 3, 6,
8216 9, 12,
8217 });
8218
8219 std::vector<unsigned int> blockShape({2, 2});
8220 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8221
8222 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8223 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8224 crops, outputShape, expectedOutput);
8225}
8226
8227LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
8228 armnn::IWorkloadFactory& workloadFactory,
8229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8230{
8231 const unsigned int inputShape[] = {4, 1, 1, 1};
8232 const unsigned int outputShape[] = {1, 1, 2, 2};
8233
8234 std::vector<uint8_t> input
8235 ({
8236 // Batch 0, Height 0, Width (2) x Channel (1)
8237 1, 2, 3, 4
8238 });
8239
8240 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8241
8242 std::vector<unsigned int> blockShape({2, 2});
8243 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8244
8245 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8246 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8247 crops, outputShape, expectedOutput);
8248}
8249
8250LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
8251 armnn::IWorkloadFactory& workloadFactory,
8252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8253{
8254 const unsigned int inputShape[] = {4, 3, 1, 1};
8255 const unsigned int outputShape[] = {1, 3, 2, 2};
8256
8257 std::vector<uint8_t> input({ 1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12 });
8258
8259 std::vector<uint8_t> expectedOutput
8260 ({
8261 // Batch 0, Channel 0, Height (2) x Width (2)
8262 1, 7,
8263 2, 8,
8264
8265 // Batch 0, Channel 1, Height (2) x Width (2)
8266 3, 9,
8267 4, 10,
8268
8269 // Batch 0, Channel 2, Height (2) x Width (2)
8270 5, 11,
8271 6, 12,
8272 });
8273 std::vector<unsigned int> blockShape({2, 2});
8274 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8275
8276 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8277 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8278 crops, outputShape, expectedOutput);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008279}
8280
8281LayerTestResult<float, 4> Debug4DFloat32Test(
8282 armnn::IWorkloadFactory& workloadFactory,
8283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8284{
8285 return Debug4DTest<float>(workloadFactory, memoryManager);
8286}
8287
8288LayerTestResult<float, 3> Debug3DFloat32Test(
8289 armnn::IWorkloadFactory& workloadFactory,
8290 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8291{
8292 return Debug3DTest<float>(workloadFactory, memoryManager);
8293}
8294
8295LayerTestResult<float, 2> Debug2DFloat32Test(
8296 armnn::IWorkloadFactory& workloadFactory,
8297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8298{
8299 return Debug2DTest<float>(workloadFactory, memoryManager);
8300}
8301
8302LayerTestResult<float, 1> Debug1DFloat32Test(
8303 armnn::IWorkloadFactory& workloadFactory,
8304 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8305{
8306 return Debug1DTest<float>(workloadFactory, memoryManager);
8307}
8308
8309LayerTestResult<uint8_t, 4> Debug4DUint8Test(
8310 armnn::IWorkloadFactory& workloadFactory,
8311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8312{
8313 return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
8314}
8315
8316LayerTestResult<uint8_t, 3> Debug3DUint8Test(
8317 armnn::IWorkloadFactory& workloadFactory,
8318 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8319{
8320 return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
8321}
8322
8323LayerTestResult<uint8_t, 2> Debug2DUint8Test(
8324 armnn::IWorkloadFactory& workloadFactory,
8325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8326{
8327 return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
8328}
8329
8330LayerTestResult<uint8_t, 1> Debug1DUint8Test(
8331 armnn::IWorkloadFactory& workloadFactory,
8332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8333{
8334 return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
8335}