blob: 4dc49f97a2ff9748c0c5b135e34d53053d166f6d [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include "test/TensorHelpers.hpp"
10#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010011#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000012
13#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000019#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000020#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021
Éanna Ó Catháinde705582018-12-03 13:04:22 +000022#include <reference/workloads/RefWorkloads.hpp>
23
telsoa014fcda012018-03-09 14:13:49 +000024#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000034#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000035#include "SplitterTestImpl.hpp"
36#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000037#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000038#include "NormTestImpl.hpp"
39#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010040#include "LstmTestImpl.hpp"
41#include "ConvertFp16ToFp32TestImpl.hpp"
42#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000043#include "DebugTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000044
telsoa01c577f2c2018-08-31 09:22:23 +010045// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000046static std::vector<float> ConvInput3x8x16({
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
71});
72
telsoa01c577f2c2018-08-31 09:22:23 +010073// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000074static std::vector<float> Bias2({0, 2});
75
telsoa01c577f2c2018-08-31 09:22:23 +010076// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000077template<typename T>
78boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
79{
80 if(biasEnabled)
81 {
82 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
83 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
84 return bias;
85 }
86 else
87 {
88 return boost::multi_array<T, 1>();
89 }
90}
91
92template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000093LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
94 armnn::IWorkloadFactory& workloadFactory,
95 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
96 float qScale,
97 int32_t qOffset,
98 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +000099 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000100{
telsoa01c577f2c2018-08-31 09:22:23 +0100101 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000102 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
103 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
104
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000106 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
107 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
108 QuantizedVector<T>(qScale, qOffset, {
109 1, 1, 1,
110 1, -1, 1,
111 1, 1, 1,
112 1, 1, 1,
113 1, 1, 1,
114
115 0, 0, 0,
116 0, 0, 0,
117 0, 0, 0,
118 0, 0, 0,
119 0, 0, 0,
120
121 2, 2, 2,
122 2, 2, 2,
123 2, 2, 2,
124 2, 2, 2,
125 2, 2, 2,
126
127
128 0, 0, 0,
129 0, 0, 0,
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133
134 1, 1, 1,
135 1, 1, 1,
136 1, 1, 1,
137 1, 1, 1,
138 1, 1, 1,
139
140 0, 0, 0,
141 0, 0, 0,
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0
145 })));
146
telsoa01c577f2c2018-08-31 09:22:23 +0100147 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000148 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
149 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
150 QuantizedVector<T>(qScale, qOffset, {
151 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
152 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
153 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
154 -23.5f, -23.5f, -23.5f,
155 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
156 -23.5f, -23.5f, -23.5f,
157
158 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
159 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
160 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
162 })));
163
164 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000165 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000166 input,
167 kernel,
168 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
169 expectedOutput,
170 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100171 qOffset,
172 layout);
telsoa014fcda012018-03-09 14:13:49 +0000173}
174
175template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000176LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
177 armnn::IWorkloadFactory& workloadFactory,
178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
179 float qScale,
180 int32_t qOffset,
181 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000182 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000183{
telsoa01c577f2c2018-08-31 09:22:23 +0100184 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000187 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
188 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000191 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
192 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
193 QuantizedVector<T>(qScale, qOffset, {
194 1, 1, 1,
195 1, -1, 1,
196 1, 1, 1,
197
198 0, 0, 0,
199 0, 0, 0,
200 0, 0, 0,
201
202 2, 2, 2,
203 2, 2, 2,
204 2, 2, 2,
205
206
207 0, 0, 0,
208 0, 0, 0,
209 0, 0, 0,
210
211 1, 1, 1,
212 1, 1, 1,
213 1, 1, 1,
214
215 0, 0, 0,
216 0, 0, 0,
217 0, 0, 0
218 })));
219
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000221 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
222 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
223 QuantizedVector<T>(qScale, qOffset, {
224 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
225 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
226 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
227 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
228 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
229 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
230
231 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
233 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
234 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
235 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
237 })));
238
239 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000241 input,
242 kernel,
243 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
244 expectedOutput,
245 qScale,
narpra015f703182018-10-26 16:24:58 +0100246 qOffset,
247 layout);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Francis Murtaghd59116e2018-10-04 16:03:07 +0100250template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254 float qScale,
255 int32_t qOffset,
256 bool biasEnabled,
257 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100258{
259 // Use common single-batch 5x5 image.
260
261 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
262 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
263 {
264 1, 5, 2, 3,
265 8, 7, 3, 6,
266 3, 3, 9, 1
267 });
268
269
270 // Use a 2-element batch of 3-channel 3x3 kernels.
271 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
272 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
273 4, 5, 6,
274 0, 0, 0,
275 3, 2, 1
276 });
277
278 // Expected output is 1 batch of a 5x5 image.
279 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
280
281 const std::vector<float> outputData =
282 {
283 23, 41, 33, 21,
284 44, 65, 76, 52,
285 82, 85, 79, 42
286 };
287
288 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
289
290 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000291 memoryManager,
Francis Murtaghd59116e2018-10-04 16:03:07 +0100292 input,
293 kernel,
294 boost::multi_array<T, 1>(),
295 expectedOutput,
296 dataLayout,
297 qScale,
298 qOffset);
299}
300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000301LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
302 armnn::IWorkloadFactory& workloadFactory,
303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
304 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000305 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000306{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000307 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000308}
309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000310LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000314 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000315{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000316 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000319LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
320 armnn::IWorkloadFactory& workloadFactory,
321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
322 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000323 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000325 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000326}
327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000328LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
329 armnn::IWorkloadFactory& workloadFactory,
330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
331 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100332{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000333 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
334 memoryManager,
335 0.f,
336 0,
337 biasEnabled,
338 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100339}
340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000341LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000345 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000346{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000347 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000348}
349
350template<typename T>
351LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
352 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000354 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000355 float qScale,
356 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000357{
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000359 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
360 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
361 QuantizedVector<T>(qScale, qOffset, {
362 11,21,31,
363 12,22,32,
364 13,23,33
365 })));
366
telsoa01c577f2c2018-08-31 09:22:23 +0100367 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000368 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
369 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
370 QuantizedVector<T>(qScale, qOffset, {
371 -11,-21,
372 -12,-22,
373 })));
374
telsoa01c577f2c2018-08-31 09:22:23 +0100375// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000376// Manually calculated like this:
377//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
378//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
379//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
380//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
381//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
382//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
383//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
384 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
385 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
386 QuantizedVector<T>(qScale, qOffset, {
387 0, 0, 0, 0, 0, 0,
388 -242, -594, -934, -372, 0, 0,
389 -495, -1190, -1850, -725, 0, 0,
390 -538, -1256, -1916, -748, 0, 0,
391 -273, -626, -946, -363, 0, 0,
392 0, 0, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0
395 })));
396
397 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000398 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000399 input,
400 kernel,
401 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
402 expectedOutput,
403 qScale,
404 qOffset,
narpra015f703182018-10-26 16:24:58 +0100405 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100406 1, // Padding left.
407 2, // Padding top.
408 3, // Padding right.
409 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000410}
411
412template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000413LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
414 armnn::IWorkloadFactory& workloadFactory,
415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000416 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000417 float qScale,
418 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000419{
telsoa01c577f2c2018-08-31 09:22:23 +0100420 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000421 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
422 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
423 QuantizedVector<T>(qScale, qOffset, {
424 11,21,31,41,51,
425 12,22,32,42,52,
426 13,23,33,43,53,
427 14,24,34,44,54,
428 15,25,35,45,55,
429 })));
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000432 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
433 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
434 QuantizedVector<T>(qScale, qOffset, {
435 -11,-21,-31,-41,
436 -12,-22,-32,-42,
437 -13,-23,-33,-43,
438 -14,-24,-34,-44,
439 })));
440
telsoa01c577f2c2018-08-31 09:22:23 +0100441 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000442 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
443 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
444 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
445 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000446 -7140, -10580, -13940, -9300, -5230,
447 -9590, -14120, -18520, -12290, -6860,
448 -9980, -14560, -18960, -12560, -7000,
449 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100450 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000451 })));
452
453 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000454 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000455 input,
456 kernel,
457 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
458 expectedOutput,
459 qScale,
460 qOffset,
narpra015f703182018-10-26 16:24:58 +0100461 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100462 1, // Padding left.
463 1, // Padding top.
464 2, // Padding right.
465 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100466}
467
468template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
470 armnn::IWorkloadFactory& workloadFactory,
471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
472 float qScale,
473 int32_t qOffset,
474 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000475 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100476{
telsoa01c577f2c2018-08-31 09:22:23 +0100477 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100478 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
479 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
480 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
481 0, 1, 2, 3, 4,
482 5, 6, 7, 8, 9,
483 10, 11, 12, 13, 14,
484 15, 16, 17, 18, 19,
485 20, 21, 22, 23, 24,
486
487 25, 26, 27, 28, 29,
488 30, 31, 32, 33, 34,
489 35, 36, 37, 38, 39,
490 40, 41, 42, 43, 44,
491 45, 46, 47, 48, 49
492 })));
493
telsoa01c577f2c2018-08-31 09:22:23 +0100494 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100495 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
496 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
497 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
498 32, 31, 30, 29,
499 28, 27, 26, 25,
500 24, 23, 22, 21,
501 20, 19, 18, 17,
502
503 16, 15, 14, 13,
504 12, 11, 10, 9,
505 8, 7, 6, 5,
506 4, 3, 2, 1
507 })));
508
telsoa01c577f2c2018-08-31 09:22:23 +0100509 // Expected output is 1 batch of a 2-channel 5x5 image.
510 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100511 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
512 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
513 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
514 1062, 1580, 1850, 1530, 1117,
515 2140, 3108, 3500, 2842, 2042,
516 3580, 5068, 5460, 4342, 3062,
517 3618, 5072, 5390, 4248, 2971,
518 3074, 4282, 4510, 3533, 2457,
519 1550, 2284, 2362, 1955, 1428,
520 2910, 4206, 4342, 3528, 2536,
521 3390, 4886, 5022, 4068, 2916,
522 3566, 5056, 5182, 4133, 2922,
523 3100, 4352, 4452, 3517, 2465
524 })));
525
526 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000527 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100528 input,
529 kernel,
530 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
531 expectedOutput,
532 qScale,
533 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100534 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100535 1, // Padding left.
536 1, // Padding top.
537 2, // Padding right.
538 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100539 1, // strideX
540 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000541}
542
Nikhil Rajcec6b652018-10-12 13:51:57 +0100543template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000544LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
545 armnn::IWorkloadFactory& workloadFactory,
546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
547 float qScale,
548 int32_t qOffset,
549 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100550{
551 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
552 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
553 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
554 0, 25,
555 1, 26,
556 2, 27,
557 3, 28,
558 4, 29,
559
560 5, 30,
561 6, 31,
562 7, 32,
563 8, 33,
564 9, 34,
565
566 10, 35,
567 11, 36,
568 12, 37,
569 13, 38,
570 14, 39,
571
572 15, 40,
573 16, 41,
574 17, 42,
575 18, 43,
576 19, 44,
577
578 20, 45,
579 21, 46,
580 22, 47,
581 23, 48,
582 24, 49
583 })));
584
585 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
586 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
587 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
588 32, 16,
589 31, 15,
590 30, 14,
591 29, 13,
592
593 28, 12,
594 27, 11,
595 26, 10,
596 25, 9,
597
598 24, 8,
599 23, 7,
600 22, 6,
601 21, 5,
602
603 20, 4,
604 19, 3,
605 18, 2,
606 17, 1
607 })));
608
609 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
610 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
611 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
612 1062, 1550,
613 1580, 2284,
614 1850, 2362,
615 1530, 1955,
616 1117, 1428,
617
618 2140, 2910,
619 3108, 4206,
620 3500, 4342,
621 2842, 3528,
622 2042, 2536,
623
624 3580, 3390,
625 5068, 4886,
626 5460, 5022,
627 4342, 4068,
628 3062, 2916,
629
630 3618, 3566,
631 5072, 5056,
632 5390, 5182,
633 4248, 4133,
634 2971, 2922,
635
636 3074, 3100,
637 4282, 4352,
638 4510, 4452,
639 3533, 3517,
640 2457, 2465
641 })));
642
643 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000644 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100645 input,
646 kernel,
647 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
648 expectedOutput,
649 qScale,
650 qOffset,
651 1, // Padding left.
652 1, // Padding top.
653 2, // Padding right.
654 2, // Padding bottom.
655 1, // strideX
656 1); // strideY
657}
658
telsoa014fcda012018-03-09 14:13:49 +0000659LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000660Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
661 armnn::IWorkloadFactory& workloadFactory,
662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000663 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000664{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
666 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000667}
668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000669LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
670 armnn::IWorkloadFactory& workloadFactory,
671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000672 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000673{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000674 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
675 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000676}
677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000678LayerTestResult<float, 4> DepthwiseConvolution2dTest(
679 armnn::IWorkloadFactory& workloadFactory,
680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
681 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000682 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000683{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000684 return DepthwiseConvolution2dTestImpl<float, float>(
685 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000686}
687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000688LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
689 armnn::IWorkloadFactory& workloadFactory,
690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
691 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100692{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000693 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100694}
695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000696LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
699 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000700 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000701{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000702 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
703 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000704}
705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
709 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000710 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100711{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000712 return DepthwiseConvolution2dAsymmetricTestCommon<float>(
713 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100714}
715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000716LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
717 armnn::IWorkloadFactory& workloadFactory,
718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
719 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000720 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000721{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000722 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
723 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000724}
725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000726LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
729 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000730 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000731{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000732 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
733 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000734}
735
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736LayerTestResult<float, 4> Convolution1dTest(
737 armnn::IWorkloadFactory& workloadFactory,
738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
739 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000740{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000741 return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000742}
743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000744LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
745 armnn::IWorkloadFactory& workloadFactory,
746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
747 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000748{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749 return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000750}
751
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000752LayerTestResult<float,4> CompareConvolution2dTest(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
755 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000756{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757 return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000758}
759
760template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000761LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
762 armnn::IWorkloadFactory& workloadFactory,
763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
764 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000765 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000766{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000767 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000768}
769
770template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000771 armnn::IWorkloadFactory&,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
773 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000774 const armnn::DataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000776template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
777 armnn::IWorkloadFactory&,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
779 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000780 const armnn::DataLayout);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000781
782LayerTestResult<float,4> SimpleNormalizationAcrossTest(
783 armnn::IWorkloadFactory& workloadFactory,
784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000785{
786 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
787 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000788 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000789}
790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000791LayerTestResult<float,4> SimpleNormalizationWithinTest(
792 armnn::IWorkloadFactory& workloadFactory,
793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000794{
795 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
796 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000797 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000798}
799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000800LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
801 armnn::IWorkloadFactory& workloadFactory,
802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100803{
804 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
805 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000806 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100807}
808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000809LayerTestResult<float,2> SimpleSoftmaxTest(
810 armnn::IWorkloadFactory& workloadFactory,
811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
812 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000813{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000814 return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000815}
816
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000817LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
818 armnn::IWorkloadFactory& workloadFactory,
819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
820 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000821{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000822 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000823}
824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000825LayerTestResult<float,4> CompareNormalizationTest(
826 armnn::IWorkloadFactory& workloadFactory,
827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
828 armnn::IWorkloadFactory& refWorkloadFactory,
829 armnn::NormalizationAlgorithmChannel normChannel,
830 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000831{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000832 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000833}
834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000835LayerTestResult<float,2> CompareSoftmaxTest(
836 armnn::IWorkloadFactory& workloadFactory,
837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000838 armnn::IWorkloadFactory& refWorkloadFactory,
839 float beta)
840{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000841 return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000842}
843
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000844LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
845 armnn::IWorkloadFactory& workloadFactory,
846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000847 armnn::IWorkloadFactory& refWorkloadFactory,
848 float beta)
849{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000851}
852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000853std::vector<LayerTestResult<float,3>> SplitterTest(
854 armnn::IWorkloadFactory& workloadFactory,
855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000856{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857 return SplitterTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000858}
859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000860std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
861 armnn::IWorkloadFactory& workloadFactory,
862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000863{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000864 return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000865}
866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867LayerTestResult<float, 3> CopyViaSplitterTest(
868 armnn::IWorkloadFactory& workloadFactory,
869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000870{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000871 return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000872}
873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000874LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
875 armnn::IWorkloadFactory& workloadFactory,
876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000877{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000878 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000879}
880
telsoa01c577f2c2018-08-31 09:22:23 +0100881LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000882 armnn::IWorkloadFactory& workloadFactory,
883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100884{
885 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
886 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
887 { 2., 3., 3., 4. }));
888
889 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
890 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
891 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
892 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000893 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
894 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100895}
896
897LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000898 armnn::IWorkloadFactory& workloadFactory,
899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100900{
901 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
902 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
903 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
904 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
905
906 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
907 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
908 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
909 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
910 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
911 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
912 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
913 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
914 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000915 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100916}
917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000918LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100921{
922 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
923 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
924 {2., 3., 3., 4.}));
925
926
927 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
928 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
929 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
930 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000932 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
933 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100934}
935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000936LayerTestResult<float,3> MergerTest(
937 armnn::IWorkloadFactory& workloadFactory,
938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000939{
surmeh013537c2c2018-05-18 16:31:43 +0100940 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000941 unsigned int outputHeight = 6;
942 unsigned int outputChannels = 3;
943
surmeh013537c2c2018-05-18 16:31:43 +0100944 unsigned int inputWidth1 = 3;
945 unsigned int inputHeight1 = 6;
946 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000947
surmeh013537c2c2018-05-18 16:31:43 +0100948 unsigned int inputWidth2 = 3;
949 unsigned int inputHeight2 = 6;
950 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000951
telsoa01c577f2c2018-08-31 09:22:23 +0100952 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000953 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
954 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
955 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000956
957 LayerTestResult<float,3> ret(outputTensorInfo);
958
telsoa014fcda012018-03-09 14:13:49 +0000959 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100960 {
961 1.0f, 2.0f, 3.0f,
962 4.0f, 5.0f, 6.0f,
963 7.0f, 8.0f, 9.0f,
964 10.0f, 11.0f, 12.0f,
965 13.0f, 14.0f, 15.0f,
966 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000967
surmeh013537c2c2018-05-18 16:31:43 +0100968 19.0f, 20.0f, 21.0f,
969 22.0f, 23.0f, 24.0f,
970 25.0f, 26.0f, 27.0f,
971 28.0f, 29.0f, 30.0f,
972 31.0f, 32.0f, 33.0f,
973 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000974
surmeh013537c2c2018-05-18 16:31:43 +0100975 37.0f, 38.0f, 39.0f,
976 40.0f, 41.0f, 42.0f,
977 43.0f, 44.0f, 45.0f,
978 46.0f, 47.0f, 48.0f,
979 49.0f, 50.0f, 51.0f,
980 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000981 })
982 );
983
telsoa014fcda012018-03-09 14:13:49 +0000984 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
985 {
surmeh013537c2c2018-05-18 16:31:43 +0100986 1.0f, 2.0f, 3.0f,
987 4.0f, 5.0f, 6.0f,
988 7.0f, 8.0f, 9.0f,
989 10.0f, 11.0f, 12.0f,
990 13.0f, 14.0f, 15.0f,
991 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000992
surmeh013537c2c2018-05-18 16:31:43 +0100993 19.0f, 20.0f, 21.0f,
994 22.0f, 23.0f, 24.0f,
995 25.0f, 26.0f, 27.0f,
996 28.0f, 29.0f, 30.0f,
997 31.0f, 32.0f, 33.0f,
998 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000999 })
1000 );
1001
1002 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1003 {
surmeh013537c2c2018-05-18 16:31:43 +01001004 37.0f, 38.0f, 39.0f,
1005 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001006 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001007 46.0f, 47.0f, 48.0f,
1008 49.0f, 50.0f, 51.0f,
1009 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001010 })
1011 );
1012
telsoa01c577f2c2018-08-31 09:22:23 +01001013 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001014 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1015
telsoa01c577f2c2018-08-31 09:22:23 +01001016 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001017 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1018
telsoa014fcda012018-03-09 14:13:49 +00001019 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1020
1021 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1022
1023 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1024 subTensorsSupported ?
1025 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1026 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1027
1028 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1029 subTensorsSupported ?
1030 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1031 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1032
telsoa014fcda012018-03-09 14:13:49 +00001033 armnn::MergerQueueDescriptor data;
1034 armnn::WorkloadInfo info;
1035 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1036 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001037 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1038
1039 data.m_ViewOrigins.push_back(window1);
1040 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001041
1042 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1043
1044 inputHandle1->Allocate();
1045 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001046 outputHandle->Allocate();
1047
1048 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1049 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001050
1051 workload->Execute();
1052
1053 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1054
1055 return ret;
1056}
1057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001058LayerTestResult<float,4> AdditionTest(
1059 armnn::IWorkloadFactory& workloadFactory,
1060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001061{
1062 unsigned int batchSize = 2;
1063 unsigned int channels = 2;
1064 unsigned int height = 2;
1065 unsigned int width = 3;
1066
1067 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1068 armnn::TensorInfo outputTensorInfo;
1069
1070 unsigned int shape[] = {batchSize, channels, height, width};
1071
1072 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1073 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1074 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1075
1076
1077 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1078 {
1079 0.0f, 2.0f, 1.0f,
1080 0.2f, 1.0f, 2.0f,
1081
1082 1.0f, 2.0f, 1.0f,
1083 0.2f, 1.0f, 2.0f,
1084
1085 0.0f, 2.0f, 1.0f,
1086 4.2f, 1.0f, 2.0f,
1087
1088 0.0f, 0.0f, 1.0f,
1089 0.2f, 1.0f, 2.0f,
1090 }));
1091
1092 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1093 {
1094 1.0f, 2.0f, 1.0f,
1095 0.0f, 1.0f, 2.0f,
1096
1097 1.0f, 2.0f, -2.0f,
1098 0.2f, 1.0f, 2.0f,
1099
1100 0.0f, 2.0f, 1.0f,
1101 4.2f, 0.0f, -3.0f,
1102
1103 0.0f, 0.0f, 1.0f,
1104 0.7f, 1.0f, 5.0f,
1105 }));
1106
1107 LayerTestResult<float,4> ret(outputTensorInfo);
1108 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1109 {
1110 1.0f, 4.0f, 2.0f,
1111 0.2f, 2.0f, 4.0f,
1112
1113 2.0f, 4.0f, -1.0f,
1114 0.4f, 2.0f, 4.0f,
1115
1116 0.0f, 4.0f, 2.0f,
1117 8.4f, 1.0f, -1.0f,
1118
1119 0.0f, 0.0f, 2.0f,
1120 0.9f, 2.0f, 7.0f,
1121 }));
1122
1123 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1124 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1125 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1126
1127 armnn::AdditionQueueDescriptor data;
1128 armnn::WorkloadInfo info;
1129 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1130 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1131 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1132
1133 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1134
1135 inputHandle1->Allocate();
1136 inputHandle2->Allocate();
1137 outputHandle->Allocate();
1138
1139 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1140 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1141
1142 workload->Execute();
1143
1144 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1145
1146 return ret;
1147}
1148
1149template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001150LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1151 armnn::IWorkloadFactory& workloadFactory,
1152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001153 float qScale,
1154 int32_t qOffset)
1155{
1156 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1157 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1158 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1159
1160 if (armnn::IsQuantizedType<T>())
1161 {
1162 inputTensorInfo1.SetQuantizationScale(qScale);
1163 inputTensorInfo1.SetQuantizationOffset(qOffset);
1164 inputTensorInfo2.SetQuantizationScale(qScale);
1165 inputTensorInfo2.SetQuantizationOffset(qOffset);
1166 outputTensorInfo.SetQuantizationScale(qScale);
1167 outputTensorInfo.SetQuantizationOffset(qOffset);
1168 }
1169
1170 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1171 {
1172 0.0f,
1173 1.0f,
1174
1175 2.0f,
1176 3.0f,
1177
1178 4.0f,
1179 5.0f,
1180 }));
1181
1182 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1183 {
1184 0.5f, 1.5f, 2.5f,
1185 3.5f, 4.5f, 5.5f,
1186 }));
1187
1188 LayerTestResult<T,4> ret(outputTensorInfo);
1189 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1190 {
1191 0.5f, 1.5f, 2.5f,
1192 4.5f, 5.5f, 6.5f,
1193
1194 2.5f, 3.5f, 4.5f,
1195 6.5f, 7.5f, 8.5f,
1196
1197 4.5f, 5.5f, 6.5f,
1198 8.5f, 9.5f, 10.5f,
1199 }));
1200
1201 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1202 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1203 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1204
1205 armnn::AdditionQueueDescriptor data;
1206 armnn::WorkloadInfo info;
1207 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1208 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1210
1211 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1212
1213 inputHandle1->Allocate();
1214 inputHandle2->Allocate();
1215 outputHandle->Allocate();
1216
1217 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1218 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1219
1220 workload->Execute();
1221
1222 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1223
1224 return ret;
1225}
1226
1227template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001228LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1229 armnn::IWorkloadFactory& workloadFactory,
1230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001231 float qScale,
1232 int32_t qOffset)
1233{
1234 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1235 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1236 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1237
1238 if (armnn::IsQuantizedType<T>())
1239 {
1240 inputTensorInfo1.SetQuantizationScale(qScale);
1241 inputTensorInfo1.SetQuantizationOffset(qOffset);
1242 inputTensorInfo2.SetQuantizationScale(qScale);
1243 inputTensorInfo2.SetQuantizationOffset(qOffset);
1244 outputTensorInfo.SetQuantizationScale(qScale);
1245 outputTensorInfo.SetQuantizationOffset(qOffset);
1246 }
1247
1248 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1249 {
1250 0.0f, 1.0f, 2.0f,
1251 3.0f, 4.0f, 5.0f,
1252 6.0f, 7.0f, 8.0f,
1253 9.0f, 10.0f, 11.0f,
1254 12.0f, 13.0f, 14.0f,
1255 15.0f, 16.0f, 17.0f,
1256 }));
1257
1258 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1259 {
1260 0.5f,
1261 }));
1262
1263 LayerTestResult<T,4> ret(outputTensorInfo);
1264 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1265 {
1266 0.5f, 1.5f, 2.5f,
1267 3.5f, 4.5f, 5.5f,
1268 6.5f, 7.5f, 8.5f,
1269 9.5f, 10.5f, 11.5f,
1270 12.5f, 13.5f, 14.5f,
1271 15.5f, 16.5f, 17.5f,
1272 }));
1273
1274 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1275 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1276 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1277
1278 armnn::AdditionQueueDescriptor data;
1279 armnn::WorkloadInfo info;
1280 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1281 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1282 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283
1284 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1285
1286 inputHandle1->Allocate();
1287 inputHandle2->Allocate();
1288 outputHandle->Allocate();
1289
1290 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1291 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1292
1293 workload->Execute();
1294
1295 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1296
1297 return ret;
1298}
1299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001300LayerTestResult<float, 4> AdditionBroadcastTest(
1301 armnn::IWorkloadFactory& workloadFactory,
1302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001303{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001304 return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001305}
1306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001307LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1308 armnn::IWorkloadFactory& workloadFactory,
1309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001310{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001311 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001312}
1313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001314LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1315 armnn::IWorkloadFactory& workloadFactory,
1316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001317{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001318 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001319}
1320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001321LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1322 armnn::IWorkloadFactory& workloadFactory,
1323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001325 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001326}
1327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001328LayerTestResult<float,4> CompareAdditionTest(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001332{
1333 unsigned int batchSize = 4;
1334 unsigned int channels = 1;
1335 unsigned int height = 2;
1336 unsigned int width = 3;
1337
1338 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1339 armnn::TensorInfo outputTensorInfo;
1340
1341 unsigned int shape[] = {batchSize, channels, height, width};
1342
1343 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1344 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1345 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1346
1347 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1348 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1349
1350 LayerTestResult<float,4> ret(outputTensorInfo);
1351
1352 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1353 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1354 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1355
1356 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1357 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1358 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1359
1360 armnn::AdditionQueueDescriptor data;
1361 armnn::WorkloadInfo info;
1362 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1363 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1364 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1365
1366 armnn::AdditionQueueDescriptor refData = data;
1367 armnn::WorkloadInfo refInfo = info;
1368 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1369 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1370 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1371
1372 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1373 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1374
1375 inputHandle1->Allocate();
1376 inputHandle2->Allocate();
1377 outputHandle->Allocate();
1378 inputHandle1Ref->Allocate();
1379 inputHandle2Ref->Allocate();
1380 outputHandleRef->Allocate();
1381
1382 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1383 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1384 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1385 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1386
1387 workload->Execute();
1388 workloadRef->Execute();
1389
1390 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1391 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1392
1393 return ret;
1394}
1395
surmeh01bceff2f2018-03-29 16:29:27 +01001396namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001397template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001398LayerTestResult<T, 4> DivisionTestHelper(
1399 armnn::IWorkloadFactory& workloadFactory,
1400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1401 const unsigned int shape0[4],
1402 const std::vector<T>& values0,
1403 float scale0,
1404 int32_t offset0,
1405 const unsigned int shape1[4],
1406 const std::vector<T> & values1,
1407 float scale1,
1408 int32_t offset1,
1409 const unsigned int outShape[4],
1410 const std::vector<T> & outValues,
1411 float outScale,
1412 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001413{
1414 auto dataType = (std::is_same<T, uint8_t>::value ?
1415 armnn::DataType::QuantisedAsymm8 :
1416 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001417
David Beck5cd01f32018-09-12 16:00:08 +01001418 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1419 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1420 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001421
David Beck5cd01f32018-09-12 16:00:08 +01001422 inputTensorInfo0.SetQuantizationScale(scale0);
1423 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001424
David Beck5cd01f32018-09-12 16:00:08 +01001425 inputTensorInfo1.SetQuantizationScale(scale1);
1426 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001427
David Beck5cd01f32018-09-12 16:00:08 +01001428 outputTensorInfo.SetQuantizationScale(outScale);
1429 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001430
David Beck5cd01f32018-09-12 16:00:08 +01001431 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1432 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001433
David Beck5cd01f32018-09-12 16:00:08 +01001434 LayerTestResult<T, 4> result(outputTensorInfo);
1435 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001436
David Beck5cd01f32018-09-12 16:00:08 +01001437 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1438 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1439 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001440
David Beck5cd01f32018-09-12 16:00:08 +01001441 armnn::DivisionQueueDescriptor data;
1442 armnn::WorkloadInfo info;
1443 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1444 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1445 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001446
David Beck5cd01f32018-09-12 16:00:08 +01001447 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001448
David Beck5cd01f32018-09-12 16:00:08 +01001449 inputHandle0->Allocate();
1450 inputHandle1->Allocate();
1451 outputHandle->Allocate();
1452
1453 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1454 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1455
David Beck5cd01f32018-09-12 16:00:08 +01001456 workload->Execute();
1457
1458 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1459
1460 return result;
1461}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001462} // anonymous namespace
1463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001464LayerTestResult<float,4> DivisionByZeroTest(
1465 armnn::IWorkloadFactory& workloadFactory,
1466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001467{
1468 const unsigned int width = 2;
1469 const unsigned int height = 2;
1470 const unsigned int channelCount = 2;
1471 const unsigned int batchSize = 2;
1472
1473 unsigned int shape[] = { batchSize, channelCount, height, width };
1474
1475 std::vector<float> input0({
1476 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1477 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1478
1479 std::vector<float> input1({
1480 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1481 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1482
1483 std::vector<float> output({
1484 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1485 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1486
David Beck5cd01f32018-09-12 16:00:08 +01001487 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001488 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001489 shape, input0, 1.0f, 0,
1490 shape, input1, 1.0f, 0,
1491 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001492}
1493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001494LayerTestResult<float,4> DivisionTest(
1495 armnn::IWorkloadFactory& workloadFactory,
1496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001497{
1498 const unsigned int width = 2;
1499 const unsigned int height = 2;
1500 const unsigned int channelCount = 2;
1501 const unsigned int batchSize = 2;
1502
1503 unsigned int shape[] = { batchSize, channelCount, height, width };
1504
1505 std::vector<float> input0({
1506 2, 2, 2, 2, 3, 3, 3, 3,
1507 4, 4, 4, 4, 5, 5, 5, 5 });
1508
1509 std::vector<float> input1({
1510 1, 1, 1, 1, 2, 2, 2, 2,
1511 4, 4, 4, 4, 4, 4, 4, 4 });
1512
1513 std::vector<float> output({
1514 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1515 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1516
David Beck5cd01f32018-09-12 16:00:08 +01001517
1518 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001519 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001520 shape, input0, 1.0f, 0,
1521 shape, input1, 1.0f, 0,
1522 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001523}
1524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001525LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1526 armnn::IWorkloadFactory& workloadFactory,
1527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001528{
1529 unsigned int shape0[] = { 1, 2, 2, 2 };
1530 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1531
1532 unsigned int shape1[] = { 1, 1, 1, 1 };
1533 std::vector<float> input1({ 2 });
1534
1535 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1536
David Beck5cd01f32018-09-12 16:00:08 +01001537
1538 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001539 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001540 shape0, input0, 1.0f, 0,
1541 shape1, input1, 1.0f, 0,
1542 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001543}
1544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001545LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1546 armnn::IWorkloadFactory& workloadFactory,
1547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001548{
1549 unsigned int shape0[] = { 1, 3, 3, 2 };
1550 std::vector<float> input0({
1551 1, 4, 3, 8, 5, 12,
1552 7, 16, 9, 20, 11, 24,
1553 13, 28, 15, 32, 17, 36});
1554
1555 unsigned int shape1[] = { 1, 1, 1, 2 };
1556 std::vector<float> input1({ 1, 2 });
1557
1558 std::vector<float> output({
1559 1, 2, 3, 4, 5, 6,
1560 7, 8, 9, 10, 11, 12,
1561 13, 14, 15, 16, 17, 18});
1562
David Beck5cd01f32018-09-12 16:00:08 +01001563 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001564 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001565 shape0, input0, 1.0f, 0,
1566 shape1, input1, 1.0f, 0,
1567 shape0, output, 1.0f, 0);
1568}
1569
1570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001571LayerTestResult<uint8_t,4> DivisionUint8Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001574{
1575 const unsigned int width = 2;
1576 const unsigned int height = 2;
1577 const unsigned int channelCount = 2;
1578 const unsigned int batchSize = 2;
1579
1580 unsigned int shape[] = { batchSize, channelCount, height, width };
1581
1582 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1583 4, 4, 4, 4, 5, 5, 5, 5 });
1584
1585 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1586 4, 4, 4, 4, 4, 4, 4, 4 });
1587
1588 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1589 4, 4, 4, 4, 5, 5, 5, 5});
1590
1591
1592 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001593 memoryManager,
1594 shape, input0, 1.0f, 0,
1595 shape, input1, 1.0f, 0,
1596 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001597}
1598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001599LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1600 armnn::IWorkloadFactory& workloadFactory,
1601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001602{
1603 unsigned int shape0[] = { 1, 2, 2, 2 };
1604 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1605
1606 unsigned int shape1[] = { 1, 1, 1, 1 };
1607 std::vector<uint8_t> input1({ 2 });
1608
1609 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1610
1611 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001612 memoryManager,
1613 shape0, input0, 1.0f, 0,
1614 shape1, input1, 1.0f, 0,
1615 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001616}
1617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001618LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1619 armnn::IWorkloadFactory& workloadFactory,
1620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001621{
1622 unsigned int shape0[] = { 1, 3, 3, 2 };
1623 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1624 7, 16, 9, 20, 11, 24,
1625 13, 28, 15, 32, 17, 36});
1626
1627 unsigned int shape1[] = { 1, 1, 1, 2 };
1628 std::vector<uint8_t> input1({ 1, 2 });
1629
1630 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1631 7, 8, 9, 10, 11, 12,
1632 13, 14, 15, 16, 17, 18});
1633
1634 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001635 memoryManager,
1636 shape0, input0, 1.0f, 0,
1637 shape1, input1, 1.0f, 0,
1638 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001639}
1640
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001641template<typename DescriptorType>
1642std::unique_ptr<armnn::IWorkload> CreateWorkload(
1643 const armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::WorkloadInfo& info,
1645 const DescriptorType& descriptor)
1646{
1647 return CreateWorkload(workloadFactory, info, descriptor);
1648};
1649
1650template<>
1651std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1652 const armnn::IWorkloadFactory& workloadFactory,
1653 const armnn::WorkloadInfo& info,
1654 const armnn::MaximumQueueDescriptor& descriptor)
1655{
1656 return workloadFactory.CreateMaximum(descriptor, info);
1657}
1658
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001659template<>
1660std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1661 const armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::WorkloadInfo& info,
1663 const armnn::MinimumQueueDescriptor& descriptor)
1664{
1665 return workloadFactory.CreateMinimum(descriptor, info);
1666}
1667
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001668template<>
1669std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
1670 const armnn::IWorkloadFactory& workloadFactory,
1671 const armnn::WorkloadInfo& info,
1672 const armnn::EqualQueueDescriptor& descriptor)
1673{
1674 return workloadFactory.CreateEqual(descriptor, info);
1675}
1676
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001677namespace {
1678 template <typename Descriptor, typename dataType>
1679 LayerTestResult<dataType, 4> ElementwiseTestHelper
1680 (armnn::IWorkloadFactory & workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1682 const unsigned int shape0[4], std::vector<dataType> values0,
1683 const unsigned int shape1[4], std::vector<dataType> values1,
1684 const unsigned int outShape[4], std::vector<dataType> outValues,
1685 float qScale = 0.0f, int qOffset = 0)
1686 {
1687 const size_t dimensionCount = 4;
1688 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
1689 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
1690 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
1691
1692 auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
1693 auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
1694
1695 if (armnn::IsQuantizedType<dataType>())
1696 {
1697 inputTensorInfo0.SetQuantizationScale(qScale);
1698 inputTensorInfo0.SetQuantizationOffset(qOffset);
1699
1700 inputTensorInfo1.SetQuantizationScale(qScale);
1701 inputTensorInfo1.SetQuantizationOffset(qOffset);
1702
1703 outputTensorInfo.SetQuantizationScale(qScale);
1704 outputTensorInfo.SetQuantizationOffset(qOffset);
1705 }
1706
1707 LayerTestResult<dataType,4> ret(outputTensorInfo);
1708
1709 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1710 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1711 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1712
1713 Descriptor data;
1714 armnn::WorkloadInfo info;
1715 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1716 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1717 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1718 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1719
1720 inputHandle0->Allocate();
1721 inputHandle1->Allocate();
1722 outputHandle->Allocate();
1723
1724 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1725 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1726
1727 ExecuteWorkload(*workload, memoryManager);
1728
1729 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1730
1731 ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
1732 return ret;
1733 }
1734}
1735
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00001736LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1738{
1739 const unsigned int width = 2;
1740 const unsigned int height = 2;
1741 const unsigned int channelCount = 2;
1742 const unsigned int batchSize = 2;
1743
1744 unsigned int shape[] = { batchSize, channelCount, height, width };
1745
1746 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1747 3, 3, 3, 3, 4, 4, 4, 4 });
1748
1749 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
1750 5, 5, 5, 5, 4, 4, 4, 4 });
1751
1752 std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
1753 0, 0, 0, 0, 1, 1, 1, 1 });
1754
1755 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1756 (workloadFactory,
1757 memoryManager,
1758 shape,
1759 input0,
1760 shape,
1761 input1,
1762 shape,
1763 output);
1764}
1765
1766LayerTestResult<float, 4> EqualBroadcast1ElementTest(
1767 armnn::IWorkloadFactory& workloadFactory,
1768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1769{
1770 unsigned int shape0[] = { 1, 2, 2, 2 };
1771 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1772
1773 unsigned int shape1[] = { 1, 1, 1, 1 };
1774 std::vector<float> input1({ 1 });
1775
1776 std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
1777
1778 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1779 (workloadFactory,
1780 memoryManager,
1781 shape0,
1782 input0,
1783 shape1,
1784 input1,
1785 shape0,
1786 output);
1787}
1788
1789LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
1790 armnn::IWorkloadFactory& workloadFactory,
1791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1792{
1793 const unsigned int shape0[] = { 1, 2, 2, 3 };
1794 const unsigned int shape1[] = { 1, 1, 1, 3 };
1795
1796 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1797 7, 8, 9, 10, 11, 12 });
1798
1799 std::vector<float> input1({ 1, 2, 3});
1800
1801 std::vector<float> output({ 1, 1, 1, 0, 0, 0,
1802 0, 0, 0, 0, 0, 0 });
1803
1804 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
1805 (workloadFactory,
1806 memoryManager,
1807 shape0,
1808 input0,
1809 shape1,
1810 input1,
1811 shape0,
1812 output);
1813}
1814
1815LayerTestResult<uint8_t, 4> EqualUint8Test(
1816 armnn::IWorkloadFactory& workloadFactory,
1817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1818{
1819 unsigned int shape[] = { 2, 2, 2, 2 };
1820
1821 // See dequantized values to the right.
1822 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
1823 3, 3, 3, 3, 5, 5, 5, 5 });
1824
1825 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
1826 3, 3, 3, 3, 5, 5, 5, 5 });
1827
1828 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1829 1, 1, 1, 1, 0, 0, 0, 0 });
1830
1831 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
1832 (workloadFactory,
1833 memoryManager,
1834 shape,
1835 input0,
1836 shape,
1837 input1,
1838 shape,
1839 output,
1840 1.0f,
1841 0);
1842}
1843
1844LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
1845 armnn::IWorkloadFactory& workloadFactory,
1846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1847{
1848 const unsigned int shape0[] = { 1, 2, 2, 3 };
1849 const unsigned int shape1[] = { 1, 1, 1, 1 };
1850
1851 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1852 7, 8, 9, 10, 11, 12 });
1853
1854 std::vector<uint8_t> input1({ 1 });
1855
1856 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
1857 0, 0, 0, 0, 0, 0 });
1858
1859 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
1860 (workloadFactory,
1861 memoryManager,
1862 shape0,
1863 input0,
1864 shape1,
1865 input1,
1866 shape0,
1867 output,
1868 1.0f,
1869 0);
1870}
1871
1872LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
1873 armnn::IWorkloadFactory& workloadFactory,
1874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1875{
1876 const unsigned int shape0[] = { 1, 2, 2, 3 };
1877 const unsigned int shape1[] = { 1, 1, 1, 3 };
1878
1879 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1880 7, 8, 9, 10, 11, 12 });
1881
1882 std::vector<uint8_t> input1({ 1, 1, 3});
1883
1884 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
1885 0, 0, 0, 0, 0, 0 });
1886
1887 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
1888 (workloadFactory,
1889 memoryManager,
1890 shape0,
1891 input0,
1892 shape1,
1893 input1,
1894 shape0,
1895 output,
1896 1.0f,
1897 0);
1898}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001899
1900LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1902{
1903 const unsigned int width = 2;
1904 const unsigned int height = 2;
1905 const unsigned int channelCount = 2;
1906 const unsigned int batchSize = 2;
1907
1908 unsigned int shape[] = { batchSize, channelCount, height, width };
1909
1910 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1911 3, 3, 3, 3, 4, 4, 4, 4 });
1912
1913 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
1914 4, 4, 4, 4, 5, 5, 5, 5 });
1915
1916 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
1917 4, 4, 4, 4, 5, 5, 5, 5 });
1918
1919 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1920 (workloadFactory,
1921 memoryManager,
1922 shape,
1923 input0,
1924 shape,
1925 input1,
1926 shape,
1927 output);
1928}
1929
1930LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
1931 armnn::IWorkloadFactory& workloadFactory,
1932 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1933{
1934 unsigned int shape0[] = { 1, 2, 2, 2 };
1935 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1936
1937 unsigned int shape1[] = { 1, 1, 1, 1 };
1938 std::vector<float> input1({ 2 });
1939
1940 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
1941
1942 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1943 (workloadFactory,
1944 memoryManager,
1945 shape0,
1946 input0,
1947 shape1,
1948 input1,
1949 shape0,
1950 output);
1951}
1952
1953LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
1954 armnn::IWorkloadFactory& workloadFactory,
1955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1956{
1957 const unsigned int shape0[] = { 1, 2, 2, 3 };
1958 const unsigned int shape1[] = { 1, 1, 1, 3 };
1959
1960 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1961 7, 8, 9, 10, 11, 12 });
1962
1963 std::vector<float> input1({ 1, 2, 3});
1964
1965 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
1966 7, 8, 9, 10, 11, 12 });
1967
1968 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1969 (workloadFactory,
1970 memoryManager,
1971 shape0,
1972 input0,
1973 shape1,
1974 input1,
1975 shape0,
1976 output);
1977}
1978
1979LayerTestResult<uint8_t, 4> MaximumUint8Test(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1982{
1983 unsigned int shape[] = { 2, 2, 2, 2 };
1984
1985 // See dequantized values to the right.
1986 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
1987 3, 3, 3, 3, 4, 4, 4, 4 });
1988
1989 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
1990 4, 4, 4, 4, 5, 5, 5, 5 });
1991
1992 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
1993 4, 4, 4, 4, 5, 5, 5, 5 });
1994
1995 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
1996 (workloadFactory,
1997 memoryManager,
1998 shape,
1999 input0,
2000 shape,
2001 input1,
2002 shape,
2003 output,
2004 1.0f,
2005 0);
2006}
2007
2008LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2009 armnn::IWorkloadFactory& workloadFactory,
2010 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2011{
2012 const unsigned int shape0[] = { 1, 2, 2, 3 };
2013 const unsigned int shape1[] = { 1, 1, 1, 1 };
2014
2015 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2016 7, 8, 9, 10, 11, 12 });
2017
2018 std::vector<uint8_t> input1({2});
2019
2020 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2021 7, 8, 9, 10, 11, 12 });
2022
2023 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
2024 (workloadFactory,
2025 memoryManager,
2026 shape0,
2027 input0,
2028 shape1,
2029 input1,
2030 shape0,
2031 output,
2032 1.0f,
2033 0);
2034}
2035
2036LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2037 armnn::IWorkloadFactory& workloadFactory,
2038 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2039{
2040 const unsigned int shape0[] = { 1, 2, 2, 3 };
2041 const unsigned int shape1[] = { 1, 1, 1, 3 };
2042
2043 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2044 7, 8, 9, 10, 11, 12 });
2045
2046 std::vector<uint8_t> input1({ 1, 10, 3});
2047
2048 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2049 7, 10, 9, 10, 11, 12 });
2050
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002051 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002052 (workloadFactory,
2053 memoryManager,
2054 shape0,
2055 input0,
2056 shape1,
2057 input1,
2058 shape0,
2059 output,
2060 1.0f,
2061 0);
2062}
2063
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002064LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2065 armnn::IWorkloadFactory& workloadFactory,
2066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2067{
2068 unsigned int shape0[] = { 1, 2, 2, 2 };
2069 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2070
2071 unsigned int shape1[] = { 1, 1, 1, 1 };
2072 std::vector<float> input1({ 2 });
2073
2074 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2075
2076 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
2077 memoryManager,
2078 shape0,
2079 input0,
2080 shape1,
2081 input1,
2082 shape0,
2083 output);
2084}
2085
2086
2087LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2088 armnn::IWorkloadFactory& workloadFactory,
2089 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2090{
2091 unsigned int shape0[] = { 1, 2, 2, 2 };
2092 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2093
2094 unsigned int shape1[] = { 1, 1, 1, 1 };
2095 std::vector<float> input1({ 5 });
2096
2097 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2098
2099 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
2100 memoryManager,
2101 shape0,
2102 input0,
2103 shape1,
2104 input1,
2105 shape0,
2106 output);
2107}
2108
2109LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2110 armnn::IWorkloadFactory & workloadFactory,
2111 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2112{
2113 const unsigned int shape0[] = { 1, 2, 2, 3 };
2114 const unsigned int shape1[] = { 1, 1, 1, 3 };
2115
2116 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2117 7, 1, 2, 3, 4, 5 });
2118
2119 std::vector<uint8_t> input1({ 1, 2, 3});
2120
2121 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2122 1, 1, 2, 1, 2, 3 });
2123
2124 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
2125 memoryManager,
2126 shape0,
2127 input0,
2128 shape1,
2129 input1,
2130 shape0,
2131 output,
2132 1.0f,
2133 0);
2134}
2135
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002136namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002137LayerTestResult<float,4> MultiplicationTestHelper(
2138 armnn::IWorkloadFactory& workloadFactory,
2139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2140 const unsigned int shape0[4],
2141 const std::vector<float> & values0,
2142 const unsigned int shape1[4],
2143 const std::vector<float> & values1,
2144 const unsigned int outShape[4],
2145 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002146{
surmeh01bceff2f2018-03-29 16:29:27 +01002147 const size_t dimensionCount = 4;
2148 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2149 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2150 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002151
surmeh01bceff2f2018-03-29 16:29:27 +01002152 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2153 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002154
2155 LayerTestResult<float,4> ret(outputTensorInfo);
2156
2157 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2158 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2159 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2160
2161 armnn::MultiplicationQueueDescriptor data;
2162 armnn::WorkloadInfo info;
2163 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2164 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2165 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2166
2167 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2168
2169 inputHandle0->Allocate();
2170 inputHandle1->Allocate();
2171 outputHandle->Allocate();
2172
2173 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2174 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2175
2176 workload->Execute();
2177
2178 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2179
surmeh01bceff2f2018-03-29 16:29:27 +01002180 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002181 return ret;
2182}
surmeh01bceff2f2018-03-29 16:29:27 +01002183} // anonymous namespace
2184
2185
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002186LayerTestResult<float,4> MultiplicationTest(
2187 armnn::IWorkloadFactory& workloadFactory,
2188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002189{
2190 const unsigned int width = 2;
2191 const unsigned int height = 2;
2192 const unsigned int channelCount = 2;
2193 const unsigned int batchSize = 2;
2194
2195 unsigned int shape[] = { batchSize, channelCount, height, width };
2196
2197 std::vector<float> input0({
2198 1, 1, 1, 1, 2, 2, 2, 2,
2199 3, 3, 3, 3, 4, 4, 4, 4 });
2200
2201 std::vector<float> input1({
2202 2, 2, 2, 2, 3, 3, 3, 3,
2203 4, 4, 4, 4, 5, 5, 5, 5 });
2204
2205 std::vector<float> output({
2206 2, 2, 2, 2, 6, 6, 6, 6,
2207 12, 12, 12, 12, 20, 20, 20, 20 });
2208
2209 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002210 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002211 shape,
2212 input0,
2213 shape,
2214 input1,
2215 shape,
2216 output);
2217}
2218
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002219LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2220 armnn::IWorkloadFactory& workloadFactory,
2221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002222{
2223 unsigned int shape0[] = { 1, 2, 2, 2 };
2224 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2225
2226 unsigned int shape1[] = { 1, 1, 1, 1 };
2227 std::vector<float> input1({ 2 });
2228
2229 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2230
2231 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002232 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002233 shape0,
2234 input0,
2235 shape1,
2236 input1,
2237 shape0,
2238 output);
2239}
2240
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002241LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2242 armnn::IWorkloadFactory& workloadFactory,
2243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002244{
2245 unsigned int shape0[] = { 1, 3, 3, 2 };
2246 std::vector<float> input0({
2247 1, 2, 3, 4, 5, 6,
2248 7, 8, 9, 10, 11, 12,
2249 13, 14, 15, 16, 17, 18});
2250
2251 unsigned int shape1[] = { 1, 1, 1, 2 };
2252 std::vector<float> input1({ 1, 2 });
2253
2254 std::vector<float> output({
2255 1, 4, 3, 8, 5, 12,
2256 7, 16, 9, 20, 11, 24,
2257 13, 28, 15, 32, 17, 36});
2258
2259 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002260 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002261 shape0,
2262 input0,
2263 shape1,
2264 input1,
2265 shape0,
2266 output);
2267}
telsoa014fcda012018-03-09 14:13:49 +00002268
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002269LayerTestResult<float,4> CompareMultiplicationTest(
2270 armnn::IWorkloadFactory& workloadFactory,
2271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2272 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002273{
2274 const unsigned int width = 16;
2275 const unsigned int height = 32;
2276 const unsigned int channelCount = 2;
2277 const unsigned int batchSize = 5;
2278
2279 armnn::TensorInfo inputTensorInfo0;
2280 armnn::TensorInfo inputTensorInfo1;
2281 armnn::TensorInfo outputTensorInfo;
2282
2283 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2284
2285 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2286 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2287 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2288
2289 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2290
2291 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2292 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2293
2294 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2295 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2296 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2297
2298 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2299 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2300 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2301
2302 armnn::MultiplicationQueueDescriptor data;
2303 armnn::WorkloadInfo info;
2304 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2305 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2306 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2307
2308 armnn::MultiplicationQueueDescriptor refData = data;
2309 armnn::WorkloadInfo refInfo = info;
2310 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2311 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2312 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2313
2314 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2315 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2316
2317 inputHandle0->Allocate();
2318 inputHandle1->Allocate();
2319 outputHandle->Allocate();
2320 inputHandle0Ref->Allocate();
2321 inputHandle1Ref->Allocate();
2322 outputHandleRef->Allocate();
2323
2324 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2325 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2326 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2327 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2328
2329 workload->Execute();
2330 workloadRef->Execute();
2331
2332 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2333 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2334
2335 return comparisonResult;
2336}
2337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002338LayerTestResult<float,4> CompareBatchNormTest(
2339 armnn::IWorkloadFactory& workloadFactory,
2340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2341 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002342{
2343 const unsigned int width = 2;
2344 const unsigned int height = 3;
2345 const unsigned int channels = 5;
2346 const unsigned int batchSize = 3;
2347
2348 armnn::TensorInfo inputTensorInfo;
2349 armnn::TensorInfo outputTensorInfo;
2350 armnn::TensorInfo tensorInfo;
2351
2352 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2353 constexpr unsigned int tensorShape[] = {channels};
2354
2355 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2356 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2357 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2358
2359 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2360
2361 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2362 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2363 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2364 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2365
2366 LayerTestResult<float,4> ret(outputTensorInfo);
2367
2368 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2369 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2370
2371 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2372 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2373
2374 armnn::BatchNormalizationQueueDescriptor data;
2375 armnn::WorkloadInfo info;
2376 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2377 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2378 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2379 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2380
2381 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2382 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2383 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2384 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2385
2386 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2387 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2388 data.m_Mean = &meanTensor;
2389 data.m_Variance = &varianceTensor;
2390 data.m_Beta = &betaTensor;
2391 data.m_Gamma = &gammaTensor;
2392 data.m_Parameters.m_Eps = 0.01f;
2393
2394 armnn::BatchNormalizationQueueDescriptor refData = data;
2395 armnn::WorkloadInfo refInfo = info;
2396 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2397 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2398
2399 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2400 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2401
2402 inputHandle->Allocate();
2403 outputHandle->Allocate();
2404 inputHandleRef->Allocate();
2405 outputHandleRef->Allocate();
2406
2407 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2408 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
2409
2410 workload->Execute();
2411 workloadRef->Execute();
2412
2413 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2414 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2415
2416 return ret;
2417}
2418
surmeh013537c2c2018-05-18 16:31:43 +01002419template<typename T>
2420void PermuteTensorData(
2421 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002423 const armnn::PermutationVector& mappings,
2424 armnn::TensorInfo & inputTensorInfo,
2425 const T * inputData,
2426 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00002427{
surmeh013537c2c2018-05-18 16:31:43 +01002428 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
2429 if (inputData == nullptr)
2430 {
2431 // Nullptr is an error in the test. By returning without doing the concatenation
2432 // I expect the caller to fail the test. It still makes sense to report this as
2433 // an assert for Debug builds.
2434 return;
2435 }
telsoa014fcda012018-03-09 14:13:49 +00002436
surmeh013537c2c2018-05-18 16:31:43 +01002437 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
2438
2439 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2440 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2441
2442 armnn::PermuteQueueDescriptor queueDescriptor;
2443 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
2444 armnn::WorkloadInfo workloadInfo;
2445 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
2446 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2447
2448 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
2449
2450 inputHandle->Allocate();
2451 outputHandle->Allocate();
2452
2453 CopyDataToITensorHandle(inputHandle.get(), inputData);
2454
2455 workload->Execute();
2456
2457 outputData.resize(outputTensorInfo.GetNumElements());
2458 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
2459 inputTensorInfo = outputTensorInfo;
2460}
2461
2462armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
2463 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2464 unsigned int concatDim)
2465{
telsoa014fcda012018-03-09 14:13:49 +00002466 std::vector<armnn::TensorShape> shapes;
2467 shapes.reserve(inputTensorInfos.size());
2468 for (const armnn::TensorInfo& it: inputTensorInfos)
2469 {
2470 shapes.push_back(it.GetShape());
2471 }
surmeh013537c2c2018-05-18 16:31:43 +01002472
2473 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
2474 shapes.end(),
2475 concatDim);
2476}
2477
2478//
narpra015cdda352018-11-19 15:30:27 +00002479// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
2480// In case of <4 dimensions we need to make sure that the concat dimensions are at least
2481// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01002482//
2483
2484bool NeedPermuteForConcat(
2485 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2486 unsigned int concatDim)
2487{
2488 // See note above. Additionally we expect the input shapes to have the
2489 // same number of dimensions.
2490 unsigned int nDimensions = 0;
2491
telsoa01c577f2c2018-08-31 09:22:23 +01002492 // Determine the number of dimensions as well as sanity check them
2493 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002494 for (auto && tensorInfo : inputTensorInfos)
2495 {
2496 if (!nDimensions)
2497 {
2498 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2499 }
2500 else
2501 {
2502 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2503 "Input shapes must have the same number of dimensions");
2504 }
2505 }
2506
narpra015cdda352018-11-19 15:30:27 +00002507 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01002508}
2509
2510armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2511{
2512 unsigned int numDims = inputShape.GetNumDimensions();
2513 if (numDims >= 3)
2514 {
2515 // Nothing to do if the inputShape has at least 3 dimensions.
2516 return inputShape;
2517 }
2518
2519 std::vector<unsigned int> newDims(size_t(3), 1u);
2520 unsigned int expandedBy = 3 - numDims;
2521 for (unsigned int i=0; i<numDims; ++i)
2522 {
2523 newDims[expandedBy+i] = inputShape[i];
2524 }
2525 return armnn::TensorShape(3u, &newDims[0]);
2526}
2527
2528void Generate3dPermuteVectorForConcat(
2529 unsigned int numDimensions,
2530 unsigned int & concatDim,
2531 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2532{
2533 BOOST_ASSERT_MSG(numDimensions <= 3,
2534 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01002535 unsigned int expandedBy = 3 - numDimensions;
2536 unsigned int expandedConcatAxis = concatDim + expandedBy;
2537
2538 if (expandedConcatAxis == 2)
2539 {
2540 concatDim = 0;
2541 armnn::PermutationVector forwardPermutation({1, 2, 0});
2542 armnn::PermutationVector reversePermutation({2, 0, 1});
2543 permutations = std::make_pair(forwardPermutation, reversePermutation);
2544 }
2545 else if (expandedConcatAxis == 1)
2546 {
2547 concatDim = 0;
2548 armnn::PermutationVector forwardPermutation({2, 0, 1});
2549 armnn::PermutationVector reversePermutation({1, 2, 0});
2550 permutations = std::make_pair(forwardPermutation, reversePermutation);
2551 }
2552 else
2553 {
2554 BOOST_ASSERT(expandedConcatAxis == 0);
2555 concatDim = 0;
2556 }
2557}
2558
2559//
2560// Permute the input tensors so we can do a supported concatenation.
2561// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2562// at the front. Finally this function tells what the output shape
2563// of the permuted concatenated tensor is going to be.
2564//
2565template <typename T>
2566void PermuteInputsForConcat(
2567 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002569 std::vector<armnn::TensorInfo> & inputTensorInfos,
2570 std::vector<T *> & inputData,
2571 std::vector<std::vector<T>> & inputDataStorage,
2572 armnn::PermutationVector & permuteVector,
2573 unsigned int & concatDim,
2574 armnn::TensorInfo & outputTensorInfo)
2575{
2576 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2577 "Expecting more than one tensor to be concatenated here");
2578
2579 unsigned int numDims = 0;
2580 unsigned int nthInput = 0;
2581 const armnn::PermutationVector identity({0, 1, 2});
2582
2583 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2584 std::make_pair(identity, identity);
2585
2586 inputDataStorage.resize(inputData.size());
2587
2588 for (auto && tensorInfo : inputTensorInfos)
2589 {
2590 if (numDims == 0)
2591 {
2592 numDims = tensorInfo.GetShape().GetNumDimensions();
2593 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00002594
telsoa01c577f2c2018-08-31 09:22:23 +01002595 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002596 permuteVector = permutations.second;
2597 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2598 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2599 }
2600 else
2601 {
2602 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2603 "All inputs must have the same number of dimensions");
2604 }
2605
2606 armnn::TensorInfo newTensorInfo = tensorInfo;
2607 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2608
2609 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002610 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002611 permutations.first,
2612 newTensorInfo,
2613 inputData[nthInput],
2614 inputDataStorage[nthInput]);
2615
2616 inputData[nthInput] = inputDataStorage[nthInput].data();
2617 inputTensorInfos[nthInput] = newTensorInfo;
2618
2619 ++nthInput;
2620 }
2621
2622 outputTensorInfo.SetShape(
2623 armnnUtils::Permuted(
2624 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2625 permutations.first));
2626}
2627
2628
2629//
2630// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002631// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002632// output.
2633//
2634template <typename T>
2635void PermuteOutputForConcat(
2636 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002638 const armnn::TensorInfo & tensorInfo,
2639 const armnn::PermutationVector & permuteVector,
2640 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2641 T * data)
2642{
2643 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2644 if (data == nullptr)
2645 {
2646 // Nullptr is an error in the test. By returning without doing the permutation
2647 // I expect the caller to fail the test. It still makes sense to report this as
2648 // an assert for Debug builds.
2649 return;
2650 }
2651
2652 armnn::TensorInfo resultTensorInfo = tensorInfo;
2653 std::vector<T> inputData(tensorInfo.GetNumElements());
2654 std::vector<T> outputData;
2655
2656 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2657
2658 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002659 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002660 permuteVector,
2661 resultTensorInfo,
2662 &inputData[0],
2663 outputData);
2664
2665 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2666}
2667
2668template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002669void Concatenate(
2670 armnn::IWorkloadFactory& workloadFactory,
2671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2672 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2673 std::initializer_list<T *> inputsOrig,
2674 const armnn::TensorInfo& outputTensorInfoOrig,
2675 T * output,
narpra015cdda352018-11-19 15:30:27 +00002676 unsigned int concatDim,
2677 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01002678{
2679 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2680 if (output == nullptr)
2681 {
2682 // Nullptr is an error in the test. By returning without doing the permutation
2683 // I expect the caller to fail the test. It still makes sense to report this as
2684 // an assert for Debug builds.
2685 return;
2686 }
2687
telsoa01c577f2c2018-08-31 09:22:23 +01002688 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002689 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2690 std::vector<T *> inputs = inputsOrig;
2691 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2692
2693 armnn::PermutationVector permuteVector{0, 1, 2};
2694
telsoa01c577f2c2018-08-31 09:22:23 +01002695 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002696 std::vector<std::vector<T>> tmpInputDataStorage;
2697
2698 const size_t inputCount = inputTensorInfos.size();
2699
2700 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2701
2702 if (needPermuteForConcat)
2703 {
2704 //
2705 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002706 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002707 //
2708 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002709 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002710 inputTensorInfos,
2711 inputs,
2712 tmpInputDataStorage,
2713 permuteVector,
2714 concatDim,
2715 outputTensorInfo);
2716 }
2717
narpra015cdda352018-11-19 15:30:27 +00002718 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00002719
2720 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2721 inputHandles.reserve(inputCount);
2722
narpra015cdda352018-11-19 15:30:27 +00002723 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2724
2725 armnn::MergerQueueDescriptor queueDescriptor;
2726 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
2727 queueDescriptor.m_Parameters = viewsDescriptor;
2728
2729 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00002730 {
narpra015cdda352018-11-19 15:30:27 +00002731 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2732 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2733 {
2734 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2735 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2736 }
telsoa014fcda012018-03-09 14:13:49 +00002737
narpra015cdda352018-11-19 15:30:27 +00002738 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00002739
narpra015cdda352018-11-19 15:30:27 +00002740 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2741 for (unsigned int i = 0; i < inputCount; ++i)
2742 {
2743 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
2744 std::unique_ptr<armnn::ITensorHandle> inputHandle =
2745 subTensorsSupported ?
2746 workloadFactory.CreateSubTensorHandle(*outputHandle,
2747 inputTensorInfo.GetShape(),
2748 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
2749 workloadFactory.CreateTensorHandle(inputTensorInfo);
2750
2751 inputHandles.emplace_back(std::move(inputHandle));
2752 }
2753
telsoa014fcda012018-03-09 14:13:49 +00002754 }
narpra015cdda352018-11-19 15:30:27 +00002755 else
2756 {
2757 for (unsigned int i = 0; i < inputCount; ++i)
2758 {
2759 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
2760 inputHandles.emplace_back(std::move(inputHandle));
2761 }
2762 }
telsoa014fcda012018-03-09 14:13:49 +00002763
2764 for (unsigned int i = 0; i < inputCount; ++i)
2765 {
surmeh013537c2c2018-05-18 16:31:43 +01002766 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002767 }
2768
2769 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2770
2771 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2772
2773 for (auto& inputHandle : inputHandles)
2774 {
2775 inputHandle->Allocate();
2776 }
2777
2778 outputHandle->Allocate();
2779
2780 unsigned int nextInputId = 0;
2781 for (auto& inputHandle : inputHandles)
2782 {
surmeh013537c2c2018-05-18 16:31:43 +01002783 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2784 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002785 }
2786
2787 workload->Execute();
2788
surmeh013537c2c2018-05-18 16:31:43 +01002789 if (needPermuteForConcat)
2790 {
2791 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002792 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002793 outputTensorInfo,
2794 permuteVector,
2795 std::move(outputHandle),
2796 output);
2797 }
2798 else
2799 {
2800 CopyDataFromITensorHandle(output, outputHandle.get());
2801 }
telsoa014fcda012018-03-09 14:13:49 +00002802}
2803
2804template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002805LayerTestResult<T, 1> Concatenation1dTestImpl(
2806 armnn::IWorkloadFactory& workloadFactory,
2807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2808 float qScale,
2809 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002810{
2811 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2812
2813 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2814 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2815 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2816
2817 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2818
2819 LayerTestResult<T, 1> result(outputTensorInfo);
2820
2821 std::vector<T> output;
2822 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002823 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002824 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2825 { input0.data(), input1.data(), input2.data() },
2826 outputTensorInfo,
2827 output.data(),
2828 0,
2829 true);
telsoa014fcda012018-03-09 14:13:49 +00002830
2831 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2832 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2833 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2834 }));
2835
2836 return result;
2837}
2838
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002839LayerTestResult<float, 1> Concatenation1dTest(
2840 armnn::IWorkloadFactory& workloadFactory,
2841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002842{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002843 return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002844}
2845
2846template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002847LayerTestResult<T, 2> Concatenation2dTestImpl(
2848 armnn::IWorkloadFactory& workloadFactory,
2849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002850 const armnn::TensorInfo& outputTensorInfo,
2851 unsigned int dimension,
2852 const float qScale,
2853 const int32_t qOffset)
2854{
2855 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2856
2857 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2858 // Batch 0
2859 1.0f, 2.0f, 3.0f,
2860
2861 // Batch 1
2862 10.0f, 11.0f, 12.0f,
2863 }));
2864
2865 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2866 // Batch 0
2867 4.0f, 5.0f, 6.0f,
2868
2869 // Batch 1
2870 13.0f, 14.0f, 15.0f,
2871 }));
2872
2873 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2874 // Batch 0
2875 7.0f, 8.0f, 9.0f,
2876
2877 // Batch 1
2878 16.0f, 17.0f, 18.0f,
2879 }));
2880
2881 LayerTestResult<T, 2> result(outputTensorInfo);
2882
2883 std::vector<T> output;
2884 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002885 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002886 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2887 { input0.data(), input1.data(), input2.data() },
2888 outputTensorInfo,
2889 output.data(),
2890 dimension,
2891 true);
telsoa014fcda012018-03-09 14:13:49 +00002892
2893 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2894 return result;
2895}
2896
2897template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002898LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
2899 armnn::IWorkloadFactory& workloadFactory,
2900 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2901 float qScale,
2902 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002903{
2904 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2905
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002906 LayerTestResult<T, 2> result =
2907 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002908 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2909 // Batch 0
2910 1.0f, 2.0f, 3.0f,
2911
2912 // Batch 1
2913 10.0f, 11.0f, 12.0f,
2914
2915 // Batch 2
2916 4.0f, 5.0f, 6.0f,
2917
2918 // Batch 3
2919 13.0f, 14.0f, 15.0f,
2920
2921 // Batch 4
2922 7.0f, 8.0f, 9.0f,
2923
2924 // Batch 5
2925 16.0f, 17.0f, 18.0f,
2926 }));
2927
2928 return result;
2929}
2930
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002931LayerTestResult<float, 2> Concatenation2dDim0Test(
2932 armnn::IWorkloadFactory& workloadFactory,
2933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002934{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002935 return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002936}
2937
2938template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002939LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
2940 armnn::IWorkloadFactory& workloadFactory,
2941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2942 float qScale,
2943 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002944{
2945 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2946
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002947 LayerTestResult<T, 2> result =
2948 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002949 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2950 // Batch 0
2951 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2952
2953 // Batch 1
2954 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2955 }));
2956
2957 return result;
2958}
2959
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002960LayerTestResult<float, 2> Concatenation2dDim1Test(
2961 armnn::IWorkloadFactory& workloadFactory,
2962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002963{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002964 return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002965}
2966
2967template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002968LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
2969 armnn::IWorkloadFactory& workloadFactory,
2970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2971 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002972 int32_t qOffset)
2973{
2974 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2975 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2976 // Batch 0
2977 1.0f, 2.0f, 3.0f,
2978
2979 // Batch 1
2980 10.0f, 11.0f, 12.0f,
2981 }));
2982
2983 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2984 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2985 // Batch 0
2986 4.0f, 5.0f, 6.0f,
2987
2988 // Batch 1
2989 13.0f, 14.0f, 15.0f,
2990
2991 // Batch 0
2992 7.0f, 8.0f, 9.0f,
2993 }));
2994
2995 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2996 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2997 // Batch 1
2998 16.0f, 17.0f, 18.0f,
2999 }));
3000
3001 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
3002 LayerTestResult<T, 2> result(outputTensorInfo);
3003
3004 std::vector<T> output;
3005 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003006 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003007 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3008 { input0.data(), input1.data(), input2.data() },
3009 outputTensorInfo,
3010 output.data(),
3011 0,
3012 true);
telsoa014fcda012018-03-09 14:13:49 +00003013
3014 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3015 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3016 // Batch 0
3017 1.0f, 2.0f, 3.0f,
3018
3019 // Batch 1
3020 10.0f, 11.0f, 12.0f,
3021
3022 // Batch 2
3023 4.0f, 5.0f, 6.0f,
3024
3025 // Batch 3
3026 13.0f, 14.0f, 15.0f,
3027
3028 // Batch 4
3029 7.0f, 8.0f, 9.0f,
3030
3031 // Batch 5
3032 16.0f, 17.0f, 18.0f,
3033 }));
3034
3035 return result;
3036}
3037
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003038LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3039 armnn::IWorkloadFactory& workloadFactory,
3040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003041{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003042 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003043}
3044
3045template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003046LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3047 armnn::IWorkloadFactory& workloadFactory,
3048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3049 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003050 int32_t qOffset)
3051{
3052 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
3053 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3054 // Batch 0
3055 1.0f, 2.0f, 3.0f,
3056
3057 // Batch 1
3058 10.0f, 11.0f, 12.0f,
3059 }));
3060
3061 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
3062 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3063 // Batch 0
3064 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3065
3066 // Batch 1
3067 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3068 }));
3069
3070 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
3071 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3072 // Batch 0
3073 9.0f,
3074
3075 // Batch 1
3076 18.0f
3077 }));
3078
3079 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
3080 LayerTestResult<T, 2> result(outputTensorInfo);
3081
3082 std::vector<T> output;
3083 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003084 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003085 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3086 { input0.data(), input1.data(), input2.data() },
3087 outputTensorInfo,
3088 output.data(),
3089 1,
3090 true);
telsoa014fcda012018-03-09 14:13:49 +00003091
3092 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3093 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3094 // Batch 0
3095 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3096
3097 // Batch 1
3098 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3099 }));
3100
3101 return result;
3102}
3103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003104LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3105 armnn::IWorkloadFactory& workloadFactory,
3106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003107{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003108 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003109}
3110
3111template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003112LayerTestResult<T, 3> Concatenation3dTestImpl(
3113 armnn::IWorkloadFactory& workloadFactory,
3114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003115 const armnn::TensorInfo& outputTensorInfo,
3116 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003117 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003118 float qScale,
3119 int32_t qOffset)
3120{
3121 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3122
3123 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3124 // Batch 0, Channel 0
3125 1.0f, 2.0f,
3126
3127 // Batch 0, Channel 1
3128 3.0f, 4.0f,
3129
3130 // Batch 0, Channel 2
3131 5.0f, 6.0f,
3132
3133 // Batch 1, Channel 0
3134 19.0f, 20.0f,
3135
3136 // Batch 1, Channel 1
3137 21.0f, 22.0f,
3138
3139 // Batch 1, Channel 2
3140 23.0f, 24.0f
3141 }));
3142
3143 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3144 // Batch 0, Channel 0
3145 7.0f, 8.0f,
3146
3147 // Batch 0, Channel 1
3148 9.0f, 10.0f,
3149
3150 // Batch 0, Channel 2
3151 11.0f, 12.0f,
3152
3153 // Batch 1, Channel 0
3154 25.0f, 26.0f,
3155
3156 // Batch 1, Channel 1
3157 27.0f, 28.0f,
3158
3159 // Batch 1, Channel 2
3160 29.0f, 30.0f
3161 }));
3162
3163 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3164 // Batch 0, Channel 0
3165 13.0f, 14.0f,
3166
3167 // Batch 0, Channel 1
3168 15.0f, 16.0f,
3169
3170 // Batch 0, Channel 2
3171 17.0f, 18.0f,
3172
3173 // Batch 1, Channel 0
3174 31.0f, 32.0f,
3175
3176 // Batch 1, Channel 1
3177 33.0f, 34.0f,
3178
3179 // Batch 1, Channel 2
3180 35.0f, 36.0f
3181 }));
3182
3183 LayerTestResult<T, 3> result(outputTensorInfo);
3184
3185 std::vector<T> output;
3186 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003187 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003188 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3189 { input0.data(), input1.data(), input2.data() },
3190 outputTensorInfo,
3191 output.data(),
3192 dimension,
3193 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003194
3195 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3196 return result;
3197}
3198
3199template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003200LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3201 armnn::IWorkloadFactory& workloadFactory,
3202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3203 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003204 int32_t qOffset)
3205{
3206 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3207
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003208 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003209 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003210 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3211 // Batch 0, Channel 0
3212 1.0f, 2.0f,
3213
3214 // Batch 0, Channel 1
3215 3.0f, 4.0f,
3216
3217 // Batch 0, Channel 2
3218 5.0f, 6.0f,
3219
3220 // Batch 1, Channel 0
3221 19.0f, 20.0f,
3222
3223 // Batch 1, Channel 1
3224 21.0f, 22.0f,
3225
3226 // Batch 1, Channel 2
3227 23.0f, 24.0f,
3228
3229 // Batch 2, Channel 0
3230 7.0f, 8.0f,
3231
3232 // Batch 2, Channel 1
3233 9.0f, 10.0f,
3234
3235 // Batch 2, Channel 2
3236 11.0f, 12.0f,
3237
3238 // Batch 3, Channel 0
3239 25.0f, 26.0f,
3240
3241 // Batch 3, Channel 1
3242 27.0f, 28.0f,
3243
3244 // Batch 3, Channel 2
3245 29.0f, 30.0f,
3246
3247 // Batch 4, Channel 0
3248 13.0f, 14.0f,
3249
3250 // Batch 4, Channel 1
3251 15.0f, 16.0f,
3252
3253 // Batch 4, Channel 2
3254 17.0f, 18.0f,
3255
3256 // Batch 5, Channel 0
3257 31.0f, 32.0f,
3258
3259 // Batch 5, Channel 1
3260 33.0f, 34.0f,
3261
3262 // Batch 5, Channel 2
3263 35.0f, 36.0f
3264 }));
narpra015cdda352018-11-19 15:30:27 +00003265
telsoa014fcda012018-03-09 14:13:49 +00003266 return result;
3267}
3268
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003269LayerTestResult<float, 3> Concatenation3dDim0Test(
3270 armnn::IWorkloadFactory& workloadFactory,
3271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003272{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003273 return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003274}
3275
3276template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003277LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3278 armnn::IWorkloadFactory& workloadFactory,
3279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3280 float qScale,
3281 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003282{
3283 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
3284
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003285 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003286 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
3287
telsoa014fcda012018-03-09 14:13:49 +00003288 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3289 // Batch 0, Channel 0
3290 1.0f, 2.0f,
3291
3292 // Batch 0, Channel 1
3293 3.0f, 4.0f,
3294
3295 // Batch 0, Channel 2
3296 5.0f, 6.0f,
3297
3298 // Batch 0, Channel 3
3299 7.0f, 8.0f,
3300
3301 // Batch 0, Channel 4
3302 9.0f, 10.0f,
3303
3304 // Batch 0, Channel 5
3305 11.0f, 12.0f,
3306
3307 // Batch 0, Channel 6
3308 13.0f, 14.0f,
3309
3310 // Batch 0, Channel 7
3311 15.0f, 16.0f,
3312
3313 // Batch 0, Channel 8
3314 17.0f, 18.0f,
3315
3316 // Batch 1, Channel 0
3317 19.0f, 20.0f,
3318
3319 // Batch 1, Channel 1
3320 21.0f, 22.0f,
3321
3322 // Batch 1, Channel 2
3323 23.0f, 24.0f,
3324
3325 // Batch 1, Channel 3
3326 25.0f, 26.0f,
3327
3328 // Batch 1, Channel 4
3329 27.0f, 28.0f,
3330
3331 // Batch 1, Channel 5
3332 29.0f, 30.0f,
3333
3334 // Batch 1, Channel 6
3335 31.0f, 32.0f,
3336
3337 // Batch 1, Channel 7
3338 33.0f, 34.0f,
3339
3340 // Batch 1, Channel 8
3341 35.0f, 36.0f
3342 }));
3343
3344 return result;
3345}
3346
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003347LayerTestResult<float, 3> Concatenation3dDim1Test(
3348 armnn::IWorkloadFactory& workloadFactory,
3349 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003350{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003351 return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003352}
3353
3354template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003355LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3356 armnn::IWorkloadFactory& workloadFactory,
3357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003358 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003359 float qScale,
3360 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003361{
3362 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3363
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003364 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003365 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
3366
telsoa014fcda012018-03-09 14:13:49 +00003367 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3368 // Batch 0, Channel 0
3369 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3370
3371 // Batch 0, Channel 1
3372 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3373
3374 // Batch 0, Channel 2
3375 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3376
3377 // Batch 1, Channel 0
3378 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3379
3380 // Batch 1, Channel 1
3381 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3382
3383 // Batch 1, Channel 2
3384 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3385 }));
3386
3387 return result;
3388}
3389
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003390LayerTestResult<float, 3> Concatenation3dDim2Test(
3391 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3393 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003394{
narpra015cdda352018-11-19 15:30:27 +00003395 return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003396}
3397
3398template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003399LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
3400 armnn::IWorkloadFactory& workloadFactory,
3401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3402 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003403 int32_t qOffset)
3404{
3405 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3406 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3407 // Batch 0, Channel 0
3408 1.0f, 2.0f,
3409
3410 // Batch 0, Channel 1
3411 3.0f, 4.0f,
3412
3413 // Batch 0, Channel 2
3414 5.0f, 6.0f,
3415
3416 // Batch 1, Channel 0
3417 19.0f, 20.0f,
3418
3419 // Batch 1, Channel 1
3420 21.0f, 22.0f,
3421
3422 // Batch 1, Channel 2
3423 23.0f, 24.0f
3424 }));
3425
3426 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
3427 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3428 // Batch 0, Channel 0
3429 7.0f, 8.0f,
3430
3431 // Batch 0, Channel 1
3432 9.0f, 10.0f,
3433
3434 // Batch 0, Channel 2
3435 11.0f, 12.0f,
3436 }));
3437
3438 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
3439 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3440 // Batch 0, Channel 0
3441 25.0f, 26.0f,
3442
3443 // Batch 0, Channel 1
3444 27.0f, 28.0f,
3445
3446 // Batch 0, Channel 2
3447 29.0f, 30.0f,
3448
3449 // Batch 1, Channel 0
3450 13.0f, 14.0f,
3451
3452 // Batch 1, Channel 1
3453 15.0f, 16.0f,
3454
3455 // Batch 1, Channel 2
3456 17.0f, 18.0f,
3457
3458 // Batch 2, Channel 0
3459 31.0f, 32.0f,
3460
3461 // Batch 2, Channel 1
3462 33.0f, 34.0f,
3463
3464 // Batch 2, Channel 2
3465 35.0f, 36.0f
3466 }));
3467
3468 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3469 LayerTestResult<T, 3> result(outputTensorInfo);
3470
3471 std::vector<T> output;
3472 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003473 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003474 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3475 { input0.data(), input1.data(), input2.data() },
3476 outputTensorInfo,
3477 output.data(),
3478 0,
3479 true);
telsoa014fcda012018-03-09 14:13:49 +00003480
3481 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3482 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3483 // Batch 0, Channel 0
3484 1.0f, 2.0f,
3485
3486 // Batch 0, Channel 1
3487 3.0f, 4.0f,
3488
3489 // Batch 0, Channel 2
3490 5.0f, 6.0f,
3491
3492 // Batch 1, Channel 0
3493 19.0f, 20.0f,
3494
3495 // Batch 1, Channel 1
3496 21.0f, 22.0f,
3497
3498 // Batch 1, Channel 2
3499 23.0f, 24.0f,
3500
3501 // Batch 2, Channel 0
3502 7.0f, 8.0f,
3503
3504 // Batch 2, Channel 1
3505 9.0f, 10.0f,
3506
3507 // Batch 2, Channel 2
3508 11.0f, 12.0f,
3509
3510 // Batch 3, Channel 0
3511 25.0f, 26.0f,
3512
3513 // Batch 3, Channel 1
3514 27.0f, 28.0f,
3515
3516 // Batch 3, Channel 2
3517 29.0f, 30.0f,
3518
3519 // Batch 4, Channel 0
3520 13.0f, 14.0f,
3521
3522 // Batch 4, Channel 1
3523 15.0f, 16.0f,
3524
3525 // Batch 4, Channel 2
3526 17.0f, 18.0f,
3527
3528 // Batch 5, Channel 0
3529 31.0f, 32.0f,
3530
3531 // Batch 5, Channel 1
3532 33.0f, 34.0f,
3533
3534 // Batch 5, Channel 2
3535 35.0f, 36.0f
3536 }));
3537
3538 return result;
3539}
3540
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003541LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3542 armnn::IWorkloadFactory& workloadFactory,
3543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003544{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003545 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003546}
3547
3548template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003549LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3550 armnn::IWorkloadFactory& workloadFactory,
3551 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3552 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003553 int32_t qOffset)
3554{
3555 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3556 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3557 // Batch 0, Channel 0
3558 1.0f, 2.0f,
3559
3560 // Batch 0, Channel 1
3561 3.0f, 4.0f,
3562
3563 // Batch 0, Channel 2
3564 5.0f, 6.0f,
3565
3566 // Batch 1, Channel 0
3567 19.0f, 20.0f,
3568
3569 // Batch 1, Channel 1
3570 21.0f, 22.0f,
3571
3572 // Batch 1, Channel 2
3573 23.0f, 24.0f
3574 }));
3575
3576 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
3577 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3578 // Batch 0, Channel 0
3579 7.0f, 8.0f,
3580
3581 // Batch 0, Channel 1
3582 9.0f, 10.0f,
3583
3584 // Batch 0, Channel 2
3585 11.0f, 12.0f,
3586
3587 // Batch 0, Channel 3
3588 25.0f, 26.0f,
3589
3590 // Batch 1, Channel 0
3591 27.0f, 28.0f,
3592
3593 // Batch 1, Channel 1
3594 29.0f, 30.0f,
3595
3596 // Batch 1, Channel 2
3597 13.0f, 14.0f,
3598
3599 // Batch 1, Channel 3
3600 15.0f, 16.0f,
3601 }));
3602
3603 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
3604 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3605 // Batch 0, Channel 0
3606 17.0f, 18.0f,
3607
3608 // Batch 1, Channel 0
3609 31.0f, 32.0f,
3610 }));
3611
3612 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
3613 LayerTestResult<T, 3> result(outputTensorInfo);
3614
3615 std::vector<T> output;
3616 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003617 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003618 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3619 { input0.data(), input1.data(), input2.data() },
3620 outputTensorInfo,
3621 output.data(),
3622 1,
3623 true);
telsoa014fcda012018-03-09 14:13:49 +00003624
3625 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3626 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3627 // Batch 0, Channel 0
3628 1.0f, 2.0f,
3629
3630 // Batch 0, Channel 1
3631 3.0f, 4.0f,
3632
3633 // Batch 0, Channel 2
3634 5.0f, 6.0f,
3635
3636 // Batch 0, Channel 3
3637 7.0f, 8.0f,
3638
3639 // Batch 0, Channel 4
3640 9.0f, 10.0f,
3641
3642 // Batch 0, Channel 5
3643 11.0f, 12.0f,
3644
3645 // Batch 0, Channel 6
3646 25.0f, 26.0f,
3647
3648 // Batch 0, Channel 7
3649 17.0f, 18.0f,
3650
3651 // Batch 1, Channel 0
3652 19.0f, 20.0f,
3653
3654 // Batch 1, Channel 1
3655 21.0f, 22.0f,
3656
3657 // Batch 1, Channel 2
3658 23.0f, 24.0f,
3659
3660 // Batch 1, Channel 3
3661 27.0f, 28.0f,
3662
3663 // Batch 1, Channel 4
3664 29.0f, 30.0f,
3665
3666 // Batch 1, Channel 5
3667 13.0f, 14.0f,
3668
3669 // Batch 1, Channel 6
3670 15.0f, 16.0f,
3671
3672 // Batch 1, Channel 7
3673 31.0f, 32.0f,
3674 }));
3675
3676 return result;
3677}
3678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003679LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
3680 armnn::IWorkloadFactory& workloadFactory,
3681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003682{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003683 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003684}
3685
3686template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003687LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
3688 armnn::IWorkloadFactory& workloadFactory,
3689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003690 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003691 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003692 int32_t qOffset)
3693{
3694 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3695 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3696 // Batch 0, Channel 0
3697 1.0f, 2.0f,
3698
3699 // Batch 0, Channel 1
3700 3.0f, 4.0f,
3701
3702 // Batch 0, Channel 2
3703 5.0f, 6.0f,
3704
3705 // Batch 1, Channel 0
3706 19.0f, 20.0f,
3707
3708 // Batch 1, Channel 1
3709 21.0f, 22.0f,
3710
3711 // Batch 1, Channel 2
3712 23.0f, 24.0f
3713 }));
3714
3715 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
3716 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3717 // Batch 0, Channel 0
3718 7.0f,
3719
3720 // Batch 0, Channel 1
3721 9.0f,
3722
3723 // Batch 0, Channel 2
3724 11.0f,
3725
3726 // Batch 1, Channel 0
3727 25.0f,
3728
3729 // Batch 1, Channel 1
3730 27.0f,
3731
3732 // Batch 1, Channel 2
3733 29.0f
3734 }));
3735
3736 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
3737 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3738 // Batch 0, Channel 0
3739 13.0f, 14.0f, 50.0f,
3740
3741 // Batch 0, Channel 1
3742 15.0f, 16.0f, 51.0f,
3743
3744 // Batch 0, Channel 2
3745 17.0f, 18.0f, 52.0f,
3746
3747 // Batch 1, Channel 0
3748 31.0f, 32.0f, 53.0f,
3749
3750 // Batch 1, Channel 1
3751 33.0f, 34.0f, 54.0f,
3752
3753 // Batch 1, Channel 2
3754 35.0f, 36.0f, 55.0f,
3755 }));
3756
3757 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3758 LayerTestResult<T, 3> result(outputTensorInfo);
3759
3760 std::vector<T> output;
3761 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003762 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003763 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3764 { input0.data(), input1.data(), input2.data() },
3765 outputTensorInfo,
3766 output.data(),
3767 2,
3768 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003769
3770 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3771 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3772 // Batch 0, Channel 0
3773 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3774
3775 // Batch 0, Channel 1
3776 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3777
3778 // Batch 0, Channel 2
3779 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3780
3781 // Batch 1, Channel 0
3782 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3783
3784 // Batch 1, Channel 1
3785 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3786
3787 // Batch 1, Channel 2
3788 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3789 }));
3790
3791 return result;
3792}
3793
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003794LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
3795 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3797 bool useSubtensor)
3798{
3799 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
3800}
3801
3802template <typename T>
3803LayerTestResult<T, 4> Concatenation4dTestImpl(
3804 armnn::IWorkloadFactory& workloadFactory,
3805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3806 const armnn::TensorInfo& outputTensorInfo,
3807 unsigned int dimension,
3808 bool useSubtensor,
3809 float qScale,
3810 int32_t qOffset)
3811{
3812 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
3813
3814 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3815 1.0f, 2.0f,
3816 3.0f, 4.0f,
3817 5.0f, 6.0f,
3818 7.0f, 8.0f,
3819 9.0f, 10.0f,
3820 11.0f, 12.0f
3821 }));
3822
3823 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3824 11.0f, 12.0f,
3825 13.0f, 14.0f,
3826 15.0f, 16.0f,
3827 17.0f, 18.0f,
3828 19.0f, 20.0f,
3829 21.0f, 22.0f
3830 }));
3831
3832 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3833 21.0f, 22.0f,
3834 23.0f, 24.0f,
3835 25.0f, 26.0f,
3836 27.0f, 28.0f,
3837 29.0f, 30.0f,
3838 31.0f, 32.0f
3839 }));
3840
3841 LayerTestResult<T, 4> result(outputTensorInfo);
3842
3843 std::vector<T> output;
3844 output.resize(outputTensorInfo.GetNumElements());
3845
3846 Concatenate<T>(workloadFactory,
3847 memoryManager,
3848 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
3849 {input0.data(), input1.data(), input2.data()},
3850 outputTensorInfo,
3851 output.data(),
3852 dimension,
3853 useSubtensor);
3854
3855 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
3856 return result;
3857}
3858
3859template <typename T>
3860LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
3861 armnn::IWorkloadFactory& workloadFactory,
3862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3863 float qScale,
3864 int32_t qOffset)
3865{
3866 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
3867
3868 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
3869 true, qScale, qOffset);
3870 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3871 1.0f, 2.0f,
3872 3.0f, 4.0f,
3873 5.0f, 6.0f,
3874 7.0f, 8.0f,
3875 9.0f, 10.0f,
3876 11.0f, 12.0f,
3877
3878 11.0f, 12.0f,
3879 13.0f, 14.0f,
3880 15.0f, 16.0f,
3881 17.0f, 18.0f,
3882 19.0f, 20.0f,
3883 21.0f, 22.0f,
3884
3885 21.0f, 22.0f,
3886 23.0f, 24.0f,
3887 25.0f, 26.0f,
3888 27.0f, 28.0f,
3889 29.0f, 30.0f,
3890 31.0f, 32.0f
3891 }));
3892 return result;
3893}
3894
3895LayerTestResult<float, 4> Concatenation4dDim0Test(
3896 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003898{
narpra015cdda352018-11-19 15:30:27 +00003899 return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3900}
3901
3902template <typename T>
3903LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
3904 armnn::IWorkloadFactory& workloadFactory,
3905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3906 float qScale,
3907 int32_t qOffset)
3908{
3909 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
3910
3911 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
3912 true, qScale, qOffset);
3913 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3914 1.0f, 2.0f,
3915 3.0f, 4.0f,
3916 5.0f, 6.0f,
3917 7.0f, 8.0f,
3918 9.0f, 10.0f,
3919 11.0f, 12.0f,
3920
3921 11.0f, 12.0f,
3922 13.0f, 14.0f,
3923 15.0f, 16.0f,
3924 17.0f, 18.0f,
3925 19.0f, 20.0f,
3926 21.0f, 22.0f,
3927
3928 21.0f, 22.0f,
3929 23.0f, 24.0f,
3930 25.0f, 26.0f,
3931 27.0f, 28.0f,
3932 29.0f, 30.0f,
3933 31.0f, 32.0f
3934 }));
3935
3936 return result;
3937}
3938
3939LayerTestResult<float, 4> Concatenation4dDim1Test(
3940 armnn::IWorkloadFactory& workloadFactory,
3941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3942{
3943 return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3944}
3945
3946template <typename T>
3947LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
3948 armnn::IWorkloadFactory& workloadFactory,
3949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3950 float qScale,
3951 int32_t qOffset)
3952{
3953 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
3954
3955 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
3956 true, qScale, qOffset);
3957 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3958 1.0f, 2.0f,
3959 3.0f, 4.0f,
3960 11.0f, 12.0f,
3961 13.0f, 14.0f,
3962 21.0f, 22.0f,
3963 23.0f, 24.0f,
3964
3965 5.0f, 6.0f,
3966 7.0f, 8.0f,
3967 15.0f, 16.0f,
3968 17.0f, 18.0f,
3969 25.0f, 26.0f,
3970 27.0f, 28.0f,
3971
3972 9.0f, 10.0f,
3973 11.0f, 12.0f,
3974 19.0f, 20.0f,
3975 21.0f, 22.0f,
3976 29.0f, 30.0f,
3977 31.0f, 32.0f
3978 }));
3979
3980 return result;
3981}
3982
3983LayerTestResult<float, 4> Concatenation4dDim2Test(
3984 armnn::IWorkloadFactory& workloadFactory,
3985 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3986{
3987 return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3988}
3989
3990template <typename T>
3991LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
3992 armnn::IWorkloadFactory& workloadFactory,
3993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3994 float qScale,
3995 int32_t qOffset,
3996 bool useSubtensor)
3997{
3998 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
3999
4000 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
4001 useSubtensor, qScale, qOffset);
4002 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4003 1.0f, 2.0f,
4004 11.0f, 12.0f,
4005 21.0f, 22.0f,
4006 3.0f, 4.0f,
4007 13.0f, 14.0f,
4008 23.0f, 24.0f,
4009
4010 5.0f, 6.0f,
4011 15.0f, 16.0f,
4012 25.0f, 26.0f,
4013 7.0f, 8.0f,
4014 17.0f, 18.0f,
4015 27.0f, 28.0f,
4016
4017 9.0f, 10.0f,
4018 19.0f, 20.0f,
4019 29.0f, 30.0f,
4020 11.0f, 12.0f,
4021 21.0f, 22.0f,
4022 31.0f, 32.0f
4023 }));
4024
4025 return result;
4026}
4027
4028LayerTestResult<float, 4> Concatenation4dDim3Test(
4029 armnn::IWorkloadFactory& workloadFactory,
4030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4031 bool useSubtensor)
4032{
4033 return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
4034}
4035
4036template <typename T>
4037LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4038 armnn::IWorkloadFactory& workloadFactory,
4039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4040 float qScale,
4041 int32_t qOffset)
4042{
4043 unsigned int dimension = 0;
4044 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4045
4046 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4047 1.0f, 2.0f,
4048 3.0f, 4.0f,
4049 5.0f, 6.0f,
4050 7.0f, 8.0f,
4051 9.0f, 10.0f,
4052 11.0f, 12.0f
4053 }));
4054
4055 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
4056
4057 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4058 11.0f, 12.0f,
4059 13.0f, 14.0f,
4060 15.0f, 16.0f,
4061 17.0f, 18.0f,
4062 19.0f, 20.0f,
4063 21.0f, 22.0f,
4064
4065 21.0f, 22.0f,
4066 23.0f, 24.0f,
4067 25.0f, 26.0f,
4068 27.0f, 28.0f,
4069 29.0f, 30.0f,
4070 31.0f, 32.0f
4071
4072 }));
4073
4074 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
4075
4076 LayerTestResult<T, 4> result(outputTensorInfo);
4077
4078 std::vector<T> output;
4079 output.resize(outputTensorInfo.GetNumElements());
4080 Concatenate<T>(workloadFactory,
4081 memoryManager,
4082 {inputTensorInfo0, inputTensorInfo1},
4083 {input0.data(), input1.data()},
4084 outputTensorInfo,
4085 output.data(),
4086 dimension,
4087 true);
4088
4089 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4090 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4091 1.0f, 2.0f,
4092 3.0f, 4.0f,
4093 5.0f, 6.0f,
4094 7.0f, 8.0f,
4095 9.0f, 10.0f,
4096 11.0f, 12.0f,
4097
4098 11.0f, 12.0f,
4099 13.0f, 14.0f,
4100 15.0f, 16.0f,
4101 17.0f, 18.0f,
4102 19.0f, 20.0f,
4103 21.0f, 22.0f,
4104
4105 21.0f, 22.0f,
4106 23.0f, 24.0f,
4107 25.0f, 26.0f,
4108 27.0f, 28.0f,
4109 29.0f, 30.0f,
4110 31.0f, 32.0f
4111 }));
4112
4113 return result;
4114}
4115
4116LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4117 armnn::IWorkloadFactory& workloadFactory,
4118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4119{
4120 return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4121}
4122
4123template <typename T>
4124LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4125 armnn::IWorkloadFactory& workloadFactory,
4126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4127 float qScale,
4128 int32_t qOffset)
4129{
4130 unsigned int dimension = 1;
4131 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4132
4133 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4134 1.0f, 2.0f,
4135 3.0f, 4.0f,
4136 5.0f, 6.0f,
4137 7.0f, 8.0f,
4138 9.0f, 10.0f,
4139 11.0f, 12.0f
4140 }));
4141
4142 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
4143
4144 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4145 11.0f, 12.0f,
4146 13.0f, 14.0f,
4147 15.0f, 16.0f,
4148 17.0f, 18.0f,
4149
4150 }));
4151
4152 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
4153
4154 LayerTestResult<T, 4> result(outputTensorInfo);
4155
4156 std::vector<T> output;
4157 output.resize(outputTensorInfo.GetNumElements());
4158 Concatenate<T>(workloadFactory,
4159 memoryManager,
4160 {inputTensorInfo0, inputTensorInfo1},
4161 {input0.data(), input1.data()},
4162 outputTensorInfo,
4163 output.data(),
4164 dimension,
4165 true);
4166
4167 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4168 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4169 1.0f, 2.0f,
4170 3.0f, 4.0f,
4171 5.0f, 6.0f,
4172 7.0f, 8.0f,
4173 9.0f, 10.0f,
4174 11.0f, 12.0f,
4175 11.0f, 12.0f,
4176 13.0f, 14.0f,
4177 15.0f, 16.0f,
4178 17.0f, 18.0f
4179 }));
4180
4181 return result;
4182}
4183
4184LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4185 armnn::IWorkloadFactory& workloadFactory,
4186 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4187{
4188 return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4189}
4190
4191template <typename T>
4192LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4193 armnn::IWorkloadFactory& workloadFactory,
4194 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4195 float qScale,
4196 int32_t qOffset)
4197{
4198 unsigned int dimension = 2;
4199 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4200
4201 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4202 1.0f, 2.0f,
4203 3.0f, 4.0f,
4204 5.0f, 6.0f,
4205 7.0f, 8.0f,
4206 9.0f, 10.0f,
4207 11.0f, 12.0f
4208 }));
4209
4210 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
4211
4212 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4213 11.0f, 12.0f,
4214 13.0f, 14.0f,
4215 15.0f, 16.0f,
4216 17.0f, 18.0f,
4217 19.0f, 20.0f,
4218 21.0f, 22.0f,
4219 23.0f, 24.0f,
4220 25.0f, 26.0f,
4221 27.0f, 28.0f
4222 }));
4223
4224 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
4225
4226 LayerTestResult<T, 4> result(outputTensorInfo);
4227
4228 std::vector<T> output;
4229 output.resize(outputTensorInfo.GetNumElements());
4230 Concatenate<T>(workloadFactory,
4231 memoryManager,
4232 {inputTensorInfo0, inputTensorInfo1},
4233 {input0.data(), input1.data()},
4234 outputTensorInfo,
4235 output.data(),
4236 dimension,
4237 true);
4238
4239 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4240 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4241 1.0f, 2.0f,
4242 3.0f, 4.0f,
4243 11.0f, 12.0f,
4244 13.0f, 14.0f,
4245 15.0f, 16.0f,
4246
4247 5.0f, 6.0f,
4248 7.0f, 8.0f,
4249 17.0f, 18.0f,
4250 19.0f, 20.0f,
4251 21.0f, 22.0f,
4252
4253 9.0f, 10.0f,
4254 11.0f, 12.0f,
4255 23.0f, 24.0f,
4256 25.0f, 26.0f,
4257 27.0f, 28.0f
4258 }));
4259
4260 return result;
4261}
4262
4263LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4264 armnn::IWorkloadFactory& workloadFactory,
4265 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4266{
4267 return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4268}
4269
4270template <typename T>
4271LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4272 armnn::IWorkloadFactory& workloadFactory,
4273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4274 float qScale,
4275 int32_t qOffset,
4276 bool useSubtensor)
4277{
4278 unsigned int dimension = 3;
4279 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4280
4281 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4282 1.0f, 2.0f,
4283 3.0f, 4.0f,
4284 5.0f, 6.0f,
4285 7.0f, 8.0f,
4286 9.0f, 10.0f,
4287 11.0f, 12.0f
4288 }));
4289
4290 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
4291
4292 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4293 11.0f, 12.0f, 13.0f,
4294 14.0f, 15.0f, 16.0f,
4295
4296 17.0f, 18.0f, 19.0f,
4297 20.0f, 21.0f, 22.0f,
4298
4299 23.0f, 24.0f, 25.0f,
4300 26.0f, 27.0f, 28.0f
4301 }));
4302
4303 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
4304
4305 LayerTestResult<T, 4> result(outputTensorInfo);
4306
4307 std::vector<T> output;
4308 output.resize(outputTensorInfo.GetNumElements());
4309 Concatenate<T>(workloadFactory,
4310 memoryManager,
4311 {inputTensorInfo0, inputTensorInfo1},
4312 {input0.data(), input1.data()},
4313 outputTensorInfo,
4314 output.data(),
4315 dimension,
4316 useSubtensor);
4317
4318 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4319 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4320 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4321 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4322 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4323 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4324 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4325 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4326 }));
4327
4328 return result;
4329}
4330
4331LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4332 armnn::IWorkloadFactory& workloadFactory,
4333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4334 bool useSubtensor)
4335{
4336 return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004337}
4338
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004339LayerTestResult<float, 4> ResizeBilinearNopTest(
4340 armnn::IWorkloadFactory& workloadFactory,
4341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004342 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004343{
Nina Drozdd41b2592018-11-19 13:03:36 +00004344 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4345 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004346
James Conroy6b965822018-11-01 11:33:09 +00004347 std::vector<float> inputData({
4348 1.0f, 2.0f, 3.0f, 4.0f,
4349 2.0f, 3.0f, 4.0f, 5.0f,
4350 3.0f, 4.0f, 5.0f, 6.0f,
4351 4.0f, 5.0f, 6.0f, 7.0f,
4352
telsoa014fcda012018-03-09 14:13:49 +00004353 1.0f, 2.0f, 3.0f, 4.0f,
4354 2.0f, 3.0f, 4.0f, 5.0f,
4355 3.0f, 4.0f, 5.0f, 6.0f,
4356 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004357 });
4358
4359 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004360 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004361 {
4362 std::vector<float> tmp(inputData.size());
4363 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4364 inputData = tmp;
4365 }
4366
4367 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004368
4369 LayerTestResult<float, 4> result(outputTensorInfo);
4370 result.outputExpected = input;
4371
4372 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4373 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4374
4375 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004376 descriptor.m_Parameters.m_DataLayout = dataLayout;
4377 armnn::WorkloadInfo info;
4378 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4379 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4380
4381 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4382
4383 inputHandle->Allocate();
4384 outputHandle->Allocate();
4385 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4386
James Conroy074f3712018-10-03 09:32:03 +01004387 workload->Execute();
4388
4389 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4390 return result;
4391}
4392
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004393LayerTestResult<float, 4> SimpleResizeBilinearTest(
4394 armnn::IWorkloadFactory& workloadFactory,
4395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004396 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01004397{
Nina Drozdd41b2592018-11-19 13:03:36 +00004398 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
4399 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01004400
James Conroy6b965822018-11-01 11:33:09 +00004401 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004402 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00004403 200.0f, 250.0f,
4404
4405 250.0f, 200.0f,
4406 250.0f, 1.0f
4407 });
James Conroy074f3712018-10-03 09:32:03 +01004408
4409 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4410 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00004411 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
4412 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
4413 // which we would expect if projecting the centre).
4414
4415 std::vector<float> outputData({
4416 1.0f,
4417
4418 250.0f
4419 });
4420
4421 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004422 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004423 {
4424 std::vector<float> tmp(inputData.size());
4425 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4426 inputData = tmp;
4427
4428 std::vector<float> tmp1(outputData.size());
4429 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4430 outputData = tmp1;
4431 }
4432
4433 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4434
James Conroy074f3712018-10-03 09:32:03 +01004435 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004436 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01004437
4438 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4439 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4440
4441 armnn::ResizeBilinearQueueDescriptor descriptor;
4442 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004443 armnn::WorkloadInfo info;
4444 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4445 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4446
4447 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4448
4449 inputHandle->Allocate();
4450 outputHandle->Allocate();
4451 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4452
4453 workload->Execute();
4454
4455 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4456 return result;
4457}
4458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004459LayerTestResult<float, 4> ResizeBilinearSqMinTest(
4460 armnn::IWorkloadFactory& workloadFactory,
4461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004462 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004463{
Nina Drozdd41b2592018-11-19 13:03:36 +00004464 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4465 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004466
James Conroy6b965822018-11-01 11:33:09 +00004467 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004468 1.0f, 2.0f, 3.0f, 4.0f,
4469 2.0f, 3.0f, 4.0f, 5.0f,
4470 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00004471 4.0f, 5.0f, 6.0f, 7.0f,
4472
4473 7.0f, 6.0f, 5.0f, 4.0f,
4474 6.0f, 5.0f, 4.0f, 3.0f,
4475 5.0f, 4.0f, 3.0f, 2.0f,
4476 4.0f, 3.0f, 2.0f, 1.0f
4477 });
4478
4479 std::vector<float> outputData({
4480 1.0f, 3.0f,
4481 3.0f, 5.0f,
4482
4483 7.0f, 5.0f,
4484 5.0f, 3.0f
4485 });
4486
4487 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004488 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004489 {
4490 std::vector<float> tmp(inputData.size());
4491 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4492 inputData = tmp;
4493
4494 std::vector<float> tmp1(outputData.size());
4495 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4496 outputData = tmp1;
4497 }
4498
4499 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004500
telsoa014fcda012018-03-09 14:13:49 +00004501 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004502 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004503
4504 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4505 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4506
4507 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004508 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004509 armnn::WorkloadInfo info;
4510 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4511 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4512
4513 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4514
4515 inputHandle->Allocate();
4516 outputHandle->Allocate();
4517 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4518
4519 workload->Execute();
4520
4521 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4522 return result;
4523}
4524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004525LayerTestResult<float, 4> ResizeBilinearMinTest(
4526 armnn::IWorkloadFactory& workloadFactory,
4527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004528 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004529{
Nina Drozdd41b2592018-11-19 13:03:36 +00004530 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
4531 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004532
James Conroy6b965822018-11-01 11:33:09 +00004533 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004534 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
4535 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00004536 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
4537
4538 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
4539 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
4540 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
4541 });
4542
4543 std::vector<float> outputData({
4544 1.0f, 2.6666f, 6.00f,
4545 78.5f, 179.3333f, 401.00f,
4546
4547 987.0f, 454.6670f, 203.33f,
4548 48.5f, 22.3333f, 10.00f
4549 });
4550
4551 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004552 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004553 {
4554 std::vector<float> tmp(inputData.size());
4555 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4556 inputData = tmp;
4557
4558 std::vector<float> tmp1(outputData.size());
4559 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4560 outputData = tmp1;
4561 }
4562
4563 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004564
4565 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004566 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004567
4568 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4569 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4570
4571 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004572 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004573 armnn::WorkloadInfo info;
4574 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4575 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4576
4577 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4578
4579 inputHandle->Allocate();
4580 outputHandle->Allocate();
4581 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4582
4583 workload->Execute();
4584
4585 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4586 return result;
4587}
4588
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004589LayerTestResult<float, 4> ResizeBilinearMagTest(
4590 armnn::IWorkloadFactory& workloadFactory,
4591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004592 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004593{
Nina Drozdd41b2592018-11-19 13:03:36 +00004594 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
4595 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004596
James Conroy6b965822018-11-01 11:33:09 +00004597 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004598 1.0f, 2.0f,
4599 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004600 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00004601
James Conroy6b965822018-11-01 11:33:09 +00004602 233.0f, 144.0f,
4603 21.0f, 13.0f,
4604 2.0f, 1.0f
4605 });
4606
4607 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01004608 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
4609 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004610 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
4611
4612 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
4613 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
4614 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
4615 });
4616
4617 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004618 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004619 {
4620 std::vector<float> tmp(inputData.size());
4621 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4622 inputData = tmp;
4623
4624 std::vector<float> tmp1(outputData.size());
4625 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4626 outputData = tmp1;
4627 }
4628
4629 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4630
4631 LayerTestResult<float, 4> result(outputTensorInfo);
4632 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004633
4634 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4635 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4636
4637 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004638 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004639 armnn::WorkloadInfo info;
4640 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4641 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4642
4643 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4644
4645 inputHandle->Allocate();
4646 outputHandle->Allocate();
4647 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4648
4649 workload->Execute();
4650
4651 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4652 return result;
4653}
4654
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004655LayerTestResult<float, 2> FakeQuantizationTest(
4656 armnn::IWorkloadFactory& workloadFactory,
4657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004658{
4659 constexpr unsigned int width = 2;
4660 constexpr unsigned int height = 3;
4661
4662 const armnn::TensorInfo tensorInfo({height, width },
4663 armnn::DataType::Float32);
4664 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4665 -10.0f, -5.0f,
4666 0.0f, 5.0f,
4667 10.0f, 10.0f
4668 }));
4669
4670 LayerTestResult<float, 2> ret(tensorInfo);
4671
4672 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4673
4674 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4675
4676 armnn::FakeQuantizationQueueDescriptor data;
4677 armnn::WorkloadInfo info;
4678
4679 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
4680 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
4681 float min = -10.f;
4682 float max = 10.f;
4683
4684 data.m_Parameters.m_Min = min;
4685 data.m_Parameters.m_Max = max;
4686
4687 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
4688 armnn::FakeQuantizationQueueDescriptor refData = data;
4689 armnn::WorkloadInfo refInfo = info;
4690 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
4691
4692 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
4693
4694 inputHandle->Allocate();
4695 outputHandle->Allocate();
4696
4697 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
4698
4699 workload->Execute();
4700
4701 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
4702
4703 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4704 0.0f, 63.0f,
4705 128.0f, 191.0f,
4706 255.0f, 255.0f
4707 }));
4708 return ret;
4709}
4710
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004711namespace
4712{
4713
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004714LayerTestResult<float, 4> L2NormalizationTestImpl(
4715 armnn::IWorkloadFactory& workloadFactory,
4716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4717 const armnn::TensorShape& inputOutputTensorShape,
4718 const std::vector<float>& inputValues,
4719 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00004720 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004721{
4722 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4723 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4724
jimfly013aab7c32018-11-12 13:32:08 +00004725 // at this point if we require it permute the input data
4726 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
4727 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004728 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004729 {
4730 std::vector<float> tmp(inputData.size());
4731 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4732 inputData = tmp;
4733 }
4734
4735 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004736
4737 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00004738 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004739 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004740 {
4741 std::vector<float> tmp(expectedOutputData.size());
4742 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
4743 expectedOutputData = tmp;
4744 }
4745 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004746
4747 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4748 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4749
4750 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00004751 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004752 armnn::WorkloadInfo info;
4753
4754 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4755 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4756
4757 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
4758
4759 inputHandle->Allocate();
4760 outputHandle->Allocate();
4761
4762 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4763
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004764 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004765
4766 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4767
4768 return result;
4769}
4770
4771float CalcInvL2Norm(std::initializer_list<float> elements)
4772{
4773 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
4774 [](float acc, float element) { return acc + element * element; });
4775 return 1.0f / sqrtf(reduction);
4776}
4777
4778} // anonymous namespace
4779
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004780template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004781LayerTestResult<T, 2> Pad2dTestCommon(
4782 armnn::IWorkloadFactory& workloadFactory,
4783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4784 float qScale,
4785 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004786{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004787 const armnn::TensorShape inputShape{ 3, 3 };
4788 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004789
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004790 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4791 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004792
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004793 std::vector<T> inputValues(
4794 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004795 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004796 // Height (3) x Width (3)
4797 4, 8, 6,
4798 7, 4, 4,
4799 3, 2, 4
4800 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004801
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004802 std::vector<T> expectedOutputValues(
4803 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004804 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004805 0, 0, 0, 0, 0, 0, 0,
4806 0, 0, 0, 0, 0, 0, 0,
4807 0, 0, 4, 8, 6, 0, 0,
4808 0, 0, 7, 4, 4, 0, 0,
4809 0, 0, 3, 2, 4, 0, 0,
4810 0, 0, 0, 0, 0, 0, 0,
4811 0, 0, 0, 0, 0, 0, 0
4812 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004813
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004814 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004816 LayerTestResult<T, 2> result(outputTensorInfo);
4817 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004818
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004819 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4820 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004821
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004822 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004823
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004824 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4825 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
4826 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004827
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004828 descriptor.m_Parameters.m_PadList = PadList;
4829 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004830
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004831 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4832 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004833
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004834 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004835
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004836 inputHandle->Allocate();
4837 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004838
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004839 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004840
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004841 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004842
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004843 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004844
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004845 return result;
4846}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004847
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004848template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004849LayerTestResult<T, 3> Pad3dTestCommon(
4850 armnn::IWorkloadFactory& workloadFactory,
4851 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4852 float qScale,
4853 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004854{
4855 const armnn::TensorShape inputShape{ 2, 2, 2 };
4856 const armnn::TensorShape outputShape{ 3, 5, 6 };
4857
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004858 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4859 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004860
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004861 std::vector<T> inputValues(
4862 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004863 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004864 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004865 0, 4,
4866 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004867
4868 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004869 6, 1,
4870 5, 2
4871 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004872
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004873 std::vector<T> expectedOutputValues(
4874 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004875 {
4876
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004877 0, 0, 0, 0, 0, 0,
4878 0, 0, 0, 0, 0, 0,
4879 0, 0, 0, 4, 0, 0,
4880 0, 0, 2, 5, 0, 0,
4881 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004882
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004883 0, 0, 0, 0, 0, 0,
4884 0, 0, 0, 0, 0, 0,
4885 0, 0, 6, 1, 0, 0,
4886 0, 0, 5, 2, 0, 0,
4887 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004888
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004889 0, 0, 0, 0, 0, 0,
4890 0, 0, 0, 0, 0, 0,
4891 0, 0, 0, 0, 0, 0,
4892 0, 0, 0, 0, 0, 0,
4893 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004894
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004895 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004896
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004897 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004898
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004899 LayerTestResult<T, 3> result(outputTensorInfo);
4900 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004901
4902 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4903 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4904
4905 armnn::PadQueueDescriptor descriptor;
4906
4907 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4908 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
4909 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
4910 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
4911
4912 descriptor.m_Parameters.m_PadList = PadList;
4913 armnn::WorkloadInfo info;
4914
4915 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4916 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4917
4918 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
4919
4920 inputHandle->Allocate();
4921 outputHandle->Allocate();
4922
4923 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
4924
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004925 workload->Execute();
4926
4927 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
4928
4929 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004930}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004931
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004932template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004933LayerTestResult<T, 4> Pad4dTestCommon(
4934 armnn::IWorkloadFactory& workloadFactory,
4935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4936 float qScale,
4937 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004938{
4939 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
4940 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
4941
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004942 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4943 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004944
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004945 std::vector<T> inputValues(
4946 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004947 {
4948 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004949 0, 1,
4950 2, 3,
4951 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004952
4953 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004954 6, 7,
4955 8, 9,
4956 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004957
4958 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004959 12, 13,
4960 14, 15,
4961 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004962
4963 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004964 18, 19,
4965 20, 21,
4966 22, 23
4967 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004968
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004969 std::vector<T> expectedOutputValues(
4970 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004971 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004972 0, 0, 0, 0,
4973 0, 0, 0, 0,
4974 0, 0, 0, 0,
4975 0, 0, 0, 0,
4976 0, 0, 0, 0,
4977 0, 0, 0, 0,
4978 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004979
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004980 0, 0, 0, 0,
4981 0, 0, 0, 0,
4982 0, 0, 0, 0,
4983 0, 0, 0, 0,
4984 0, 0, 0, 0,
4985 0, 0, 0, 0,
4986 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004987
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004988 0, 0, 0, 0,
4989 0, 0, 0, 0,
4990 0, 0, 0, 0,
4991 0, 0, 0, 0,
4992 0, 0, 0, 0,
4993 0, 0, 0, 0,
4994 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004995
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004996 0, 0, 0, 0,
4997 0, 0, 0, 0,
4998 0, 0, 0, 0,
4999 0, 0, 0, 0,
5000 0, 0, 0, 0,
5001 0, 0, 0, 0,
5002 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005003
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005004 0, 0, 0, 0,
5005 0, 0, 0, 0,
5006 0, 0, 0, 0,
5007 0, 0, 0, 0,
5008 0, 0, 0, 0,
5009 0, 0, 0, 0,
5010 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005011
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005012 0, 0, 0, 0,
5013 0, 0, 0, 0,
5014 0, 0, 0, 0,
5015 0, 0, 0, 0,
5016 0, 0, 0, 0,
5017 0, 0, 0, 0,
5018 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005019
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005020 0, 0, 0, 0,
5021 0, 0, 0, 0,
5022 0, 0, 0, 0,
5023 0, 0, 0, 0,
5024 0, 0, 0, 0,
5025 0, 0, 0, 0,
5026 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005027
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005028 0, 0, 0, 0,
5029 0, 0, 0, 0,
5030 0, 0, 0, 0,
5031 0, 0, 1, 0,
5032 0, 2, 3, 0,
5033 0, 4, 5, 0,
5034 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005035
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005036 0, 0, 0, 0,
5037 0, 0, 0, 0,
5038 0, 0, 0, 0,
5039 0, 6, 7, 0,
5040 0, 8, 9, 0,
5041 0, 10, 11, 0,
5042 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005043
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005044 0, 0, 0, 0,
5045 0, 0, 0, 0,
5046 0, 0, 0, 0,
5047 0, 0, 0, 0,
5048 0, 0, 0, 0,
5049 0, 0, 0, 0,
5050 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005051
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005052 0, 0, 0, 0,
5053 0, 0, 0, 0,
5054 0, 0, 0, 0,
5055 0, 0, 0, 0,
5056 0, 0, 0, 0,
5057 0, 0, 0, 0,
5058 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005059
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005060 0, 0, 0, 0,
5061 0, 0, 0, 0,
5062 0, 0, 0, 0,
5063 0, 0, 0, 0,
5064 0, 0, 0, 0,
5065 0, 0, 0, 0,
5066 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005067
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005068 0, 0, 0, 0,
5069 0, 0, 0, 0,
5070 0, 0, 0, 0,
5071 0, 12, 13, 0,
5072 0, 14, 15, 0,
5073 0, 16, 17, 0,
5074 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005075
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005076 0, 0, 0, 0,
5077 0, 0, 0, 0,
5078 0, 0, 0, 0,
5079 0, 18, 19, 0,
5080 0, 20, 21, 0,
5081 0, 22, 23, 0,
5082 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005083
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005084 0, 0, 0, 0,
5085 0, 0, 0, 0,
5086 0, 0, 0, 0,
5087 0, 0, 0, 0,
5088 0, 0, 0, 0,
5089 0, 0, 0, 0,
5090 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005091
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005092 0, 0, 0, 0,
5093 0, 0, 0, 0,
5094 0, 0, 0, 0,
5095 0, 0, 0, 0,
5096 0, 0, 0, 0,
5097 0, 0, 0, 0,
5098 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005099
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005100 0, 0, 0, 0,
5101 0, 0, 0, 0,
5102 0, 0, 0, 0,
5103 0, 0, 0, 0,
5104 0, 0, 0, 0,
5105 0, 0, 0, 0,
5106 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005107
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005108 0, 0, 0, 0,
5109 0, 0, 0, 0,
5110 0, 0, 0, 0,
5111 0, 0, 0, 0,
5112 0, 0, 0, 0,
5113 0, 0, 0, 0,
5114 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005115
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005116 0, 0, 0, 0,
5117 0, 0, 0, 0,
5118 0, 0, 0, 0,
5119 0, 0, 0, 0,
5120 0, 0, 0, 0,
5121 0, 0, 0, 0,
5122 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005123
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005124 0, 0, 0, 0,
5125 0, 0, 0, 0,
5126 0, 0, 0, 0,
5127 0, 0, 0, 0,
5128 0, 0, 0, 0,
5129 0, 0, 0, 0,
5130 0, 0, 0, 0
5131 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005132
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005133 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005134
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005135 LayerTestResult<T, 4> result(outputTensorInfo);
5136 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005137
5138 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5139 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5140
5141 armnn::PadQueueDescriptor descriptor;
5142
5143 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5144 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5145 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5146 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
5147 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
5148
5149 descriptor.m_Parameters.m_PadList = PadList;
5150 armnn::WorkloadInfo info;
5151
5152 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5153 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5154
5155 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5156
5157 inputHandle->Allocate();
5158 outputHandle->Allocate();
5159
5160 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5161
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005162 workload->Execute();
5163
5164 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5165
5166 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005167}
5168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005169LayerTestResult<uint8_t, 2> PadUint82dTest(
5170 armnn::IWorkloadFactory& workloadFactory,
5171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005172{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005173 return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005174}
5175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005176LayerTestResult<uint8_t, 3> PadUint83dTest(
5177 armnn::IWorkloadFactory& workloadFactory,
5178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005179{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005180 return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005181}
5182
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005183LayerTestResult<uint8_t, 4> PadUint84dTest(
5184 armnn::IWorkloadFactory& workloadFactory,
5185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005186{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005187 return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005188}
5189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005190LayerTestResult<float, 2> PadFloat322dTest(
5191 armnn::IWorkloadFactory& workloadFactory,
5192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005193{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005194 return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005195}
5196
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005197LayerTestResult<float, 3> PadFloat323dTest(
5198 armnn::IWorkloadFactory& workloadFactory,
5199 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005200{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005201 return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005202}
5203
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005204LayerTestResult<float, 4> PadFloat324dTest(
5205 armnn::IWorkloadFactory& workloadFactory,
5206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005207{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005208 return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005209}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005211LayerTestResult<float, 4> L2Normalization1dTest(
5212 armnn::IWorkloadFactory& workloadFactory,
5213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005214 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005215{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005216 // Width: 1
5217 // Height: 1
5218 // Channels: 10
5219 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005220 unsigned int numberOfBatches = 1;
5221 unsigned int numberOfChannels = 10;
5222 unsigned int height = 1;
5223 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005224
jimfly013aab7c32018-11-12 13:32:08 +00005225
Nina Drozdd41b2592018-11-19 13:03:36 +00005226 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005227 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005228 std::vector<float> inputValues
5229 {
5230 // Batch 0, Channel 0, Height (1) x Width (1)
5231 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005232
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005233 // Batch 0, Channel 1, Height (1) x Width (1)
5234 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005235
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005236 // Batch 0, Channel 2, Height (1) x Width (1)
5237 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005238
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005239 // Batch 0, Channel 3, Height (1) x Width (1)
5240 4.0f,
5241
5242 // Batch 0, Channel 4, Height (1) x Width (1)
5243 5.0f,
5244
5245 // Batch 0, Channel 5, Height (1) x Width (1)
5246 6.0f,
5247
5248 // Batch 0, Channel 6, Height (1) x Width (1)
5249 7.0f,
5250
5251 // Batch 0, Channel 7, Height (1) x Width (1)
5252 8.0f,
5253
5254 // Batch 0, Channel 8, Height (1) x Width (1)
5255 9.0f,
5256
5257 // Batch 0, Channel 9, Height (1) x Width (1)
5258 10.0f
5259 };
telsoa014fcda012018-03-09 14:13:49 +00005260 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005261 std::vector<float> expectedOutputValues
5262 {
5263 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005264 1.0f * approxInvL2Norm,
5265 2.0f * approxInvL2Norm,
5266 3.0f * approxInvL2Norm,
5267 4.0f * approxInvL2Norm,
5268 5.0f * approxInvL2Norm,
5269 6.0f * approxInvL2Norm,
5270 7.0f * approxInvL2Norm,
5271 8.0f * approxInvL2Norm,
5272 9.0f * approxInvL2Norm,
5273 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005274 };
telsoa014fcda012018-03-09 14:13:49 +00005275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005276
5277 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005278 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005279}
5280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005281LayerTestResult<float, 4> L2Normalization2dTest(
5282 armnn::IWorkloadFactory& workloadFactory,
5283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005284 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005285{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005286 // Width: 5
5287 // Height: 1
5288 // Channels: 2
5289 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005290 unsigned int numberOfBatches = 1;
5291 unsigned int numberOfChannels = 2;
5292 unsigned int height = 1;
5293 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005294
Nina Drozdd41b2592018-11-19 13:03:36 +00005295 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005296 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005297 std::vector<float> inputValues
5298 {
5299 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005300 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005301
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005302 // Batch 0, Channel 1, Height (1) x Width (5)
5303 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5304 };
5305 std::vector<float> expectedOutputValues
5306 {
5307 // Batch 0, Channel 0, Height (1) x Width (5)
5308 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5309 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5310 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5311 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005312 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5313
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005314 // Batch 0, Channel 1, Height (1) x Width (5)
5315 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5316 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5317 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5318 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005319 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005320 };
telsoa014fcda012018-03-09 14:13:49 +00005321
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005322 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005323 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005324}
telsoa014fcda012018-03-09 14:13:49 +00005325
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005326LayerTestResult<float, 4> L2Normalization3dTest(
5327 armnn::IWorkloadFactory& workloadFactory,
5328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005329 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005330{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005331 // Width: 3
5332 // Height: 4
5333 // Channels: 2
5334 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005335 unsigned int numberOfBatches = 1;
5336 unsigned int numberOfChannels = 2;
5337 unsigned int height = 4;
5338 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005339
Nina Drozdd41b2592018-11-19 13:03:36 +00005340 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005341 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005342 std::vector<float> inputValues
5343 {
5344 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005345 119.0f, 21.0f, 150.0f,
5346 149.0f, 32.0f, 179.0f,
5347 15.0f, 227.0f, 141.0f,
5348 147.0f, 199.0f, 220.0f,
5349
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005350 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005351 110.0f, 140.0f, 73.0f,
5352 211.0f, 212.0f, 89.0f,
5353 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005354 162.0f, 12.0f, 161.0f
5355 };
5356 std::vector<float> expectedOutputValues
5357 {
5358 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005359 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5360 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5361 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5362 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5363 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5364 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5365 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5366 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5367 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5368 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5369 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
5370 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
5371
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005372 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005373 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5374 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5375 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5376 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5377 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5378 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5379 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5380 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5381 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5382 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5383 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005384 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
5385 };
telsoa014fcda012018-03-09 14:13:49 +00005386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005387 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005388 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005389}
telsoa014fcda012018-03-09 14:13:49 +00005390
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005391LayerTestResult<float, 4> L2Normalization4dTest(
5392 armnn::IWorkloadFactory& workloadFactory,
5393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005394 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005395{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005396 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005397 // Height: 4
5398 // Channels: 3
5399 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00005400 unsigned int numberOfBatches = 2;
5401 unsigned int numberOfChannels = 3;
5402 unsigned int height = 4;
5403 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005404
Nina Drozdd41b2592018-11-19 13:03:36 +00005405 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005406 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005407 std::vector<float> inputValues
5408 {
5409 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005410 235.0f, 46.0f, 178.0f,
5411 100.0f, 123.0f, 19.0f,
5412 172.0f, 74.0f, 250.0f,
5413 6.0f, 195.0f, 80.0f,
5414
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005415 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005416 113.0f, 95.0f, 202.0f,
5417 77.0f, 114.0f, 71.0f,
5418 122.0f, 246.0f, 166.0f,
5419 82.0f, 28.0f, 37.0f,
5420
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005421 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005422 56.0f, 170.0f, 162.0f,
5423 194.0f, 89.0f, 254.0f,
5424 12.0f, 209.0f, 200.0f,
5425 1.0f, 64.0f, 54.0f,
5426
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005427 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005428 67.0f, 90.0f, 49.0f,
5429 7.0f, 163.0f, 18.0f,
5430 25.0f, 117.0f, 103.0f,
5431 247.0f, 59.0f, 189.0f,
5432
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005433 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005434 239.0f, 104.0f, 199.0f,
5435 17.0f, 124.0f, 153.0f,
5436 222.0f, 217.0f, 75.0f,
5437 32.0f, 126.0f, 21.0f,
5438
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005439 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005440 97.0f, 145.0f, 215.0f,
5441 115.0f, 116.0f, 238.0f,
5442 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005443 92.0f, 125.0f, 88.0f
5444 };
5445 std::vector<float> expectedOutputValues
5446 {
5447 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005448 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5449 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5450 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5451 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5452 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5453 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5454 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5455 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5456 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5457 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5458 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5459 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5460
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005461 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005462 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5463 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5464 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5465 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5466 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5467 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5468 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5469 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5470 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5471 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5472 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5473 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5474
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005475 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005476 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5477 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5478 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5479 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5480 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5481 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5482 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5483 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5484 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5485 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5486 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5487 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5488
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005489 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005490 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5491 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5492 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5493 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5494 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5495 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5496 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5497 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5498 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5499 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5500 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5501 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5502
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005503 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005504 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5505 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5506 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5507 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5508 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5509 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5510 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5511 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5512 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5513 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5514 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5515 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5516
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005517 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005518 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5519 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5520 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5521 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5522 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5523 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5524 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5525 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5526 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5527 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5528 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005529 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
5530 };
telsoa014fcda012018-03-09 14:13:49 +00005531
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005532 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005533 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005534}
5535
5536template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005537LayerTestResult<T, 4> ConstantTestImpl(
5538 armnn::IWorkloadFactory& workloadFactory,
5539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005540 float qScale,
5541 int32_t qOffset)
5542{
5543 constexpr unsigned int inputWidth = 3;
5544 constexpr unsigned int inputHeight = 4;
5545 constexpr unsigned int inputChannels = 3;
5546 constexpr unsigned int inputBatchSize = 2;
5547
5548 constexpr unsigned int outputWidth = inputWidth;
5549 constexpr unsigned int outputHeight = inputHeight;
5550 constexpr unsigned int outputChannels = inputChannels;
5551 constexpr unsigned int outputBatchSize = inputBatchSize;
5552
5553 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5554 armnn::GetDataType<T>());
5555
5556 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5557 armnn::GetDataType<T>());
5558
5559 // Set quantization parameters if the requested type is a quantized type.
5560 if(armnn::IsQuantizedType<T>())
5561 {
5562 inputTensorInfo.SetQuantizationScale(qScale);
5563 inputTensorInfo.SetQuantizationOffset(qOffset);
5564 outputTensorInfo.SetQuantizationScale(qScale);
5565 outputTensorInfo.SetQuantizationOffset(qOffset);
5566 }
5567
5568 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
5569 QuantizedVector<T>(qScale, qOffset, {
5570 // Batch 0, Channel 0
5571 235.0f, 46.0f, 178.0f,
5572 100.0f, 123.0f, 19.0f,
5573 172.0f, 74.0f, 250.0f,
5574 6.0f, 195.0f, 80.0f,
5575
5576 // Batch 0, Channel 1
5577 113.0f, 95.0f, 202.0f,
5578 77.0f, 114.0f, 71.0f,
5579 122.0f, 246.0f, 166.0f,
5580 82.0f, 28.0f, 37.0f,
5581
5582 // Batch 0, Channel 2
5583 56.0f, 170.0f, 162.0f,
5584 194.0f, 89.0f, 254.0f,
5585 12.0f, 209.0f, 200.0f,
5586 1.0f, 64.0f, 54.0f,
5587
5588 // Batch 1, Channel 0
5589 67.0f, 90.0f, 49.0f,
5590 7.0f, 163.0f, 18.0f,
5591 25.0f, 117.0f, 103.0f,
5592 247.0f, 59.0f, 189.0f,
5593
5594 // Batch 1, Channel 1
5595 239.0f, 104.0f, 199.0f,
5596 17.0f, 124.0f, 153.0f,
5597 222.0f, 217.0f, 75.0f,
5598 32.0f, 126.0f, 21.0f,
5599
5600 // Batch 1, Channel 2
5601 97.0f, 145.0f, 215.0f,
5602 115.0f, 116.0f, 238.0f,
5603 226.0f, 16.0f, 132.0f,
5604 92.0f, 125.0f, 88.0f,
5605 })));
5606
5607 LayerTestResult<T, 4> result(outputTensorInfo);
5608 result.outputExpected = input;
5609
5610 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5611
5612 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
5613 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
5614
5615 armnn::ConstantQueueDescriptor descriptor;
5616 descriptor.m_LayerOutput = &constantTensor;
5617
5618 armnn::WorkloadInfo info;
5619 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5620
5621 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
5622
5623 outputHandle->Allocate();
5624
5625 workload->Execute();
5626
5627 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5628 return result;
5629}
5630
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005631LayerTestResult<float, 4> ConstantTest(
5632 armnn::IWorkloadFactory& workloadFactory,
5633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005634{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005635 return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005636}
5637
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005638LayerTestResult<uint8_t, 4> ConstantTestUint8(
5639 armnn::IWorkloadFactory& workloadFactory,
5640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005641{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005642 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005643}
5644
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005645LayerTestResult<uint8_t, 3> MergerUint8Test(
5646 armnn::IWorkloadFactory& workloadFactory,
5647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005648{
surmeh013537c2c2018-05-18 16:31:43 +01005649 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00005650 unsigned int outputHeight = 6;
5651 unsigned int outputChannels = 3;
5652
surmeh013537c2c2018-05-18 16:31:43 +01005653 unsigned int inputWidth1 = 3;
5654 unsigned int inputHeight1 = 6;
5655 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00005656
surmeh013537c2c2018-05-18 16:31:43 +01005657 unsigned int inputWidth2 = 3;
5658 unsigned int inputHeight2 = 6;
5659 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00005660
telsoa01c577f2c2018-08-31 09:22:23 +01005661 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00005662 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
5663 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
5664 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00005665
telsoa01c577f2c2018-08-31 09:22:23 +01005666 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00005667 const float scale = 0.13497836f;
5668 const int32_t offset = -7;
5669
5670 outputTensorInfo.SetQuantizationScale(scale);
5671 outputTensorInfo.SetQuantizationOffset(offset);
5672 inputTensorInfo1.SetQuantizationScale(scale);
5673 inputTensorInfo1.SetQuantizationOffset(offset);
5674 inputTensorInfo2.SetQuantizationScale(scale);
5675 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00005676
5677 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
5678
5679 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01005680 {
5681 1, 2, 3,
5682 4, 5, 6,
5683 7, 8, 9,
5684 10, 11, 12,
5685 13, 14, 15,
5686 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005687
surmeh013537c2c2018-05-18 16:31:43 +01005688 19, 20, 21,
5689 22, 23, 24,
5690 25, 26, 27,
5691 28, 29, 30,
5692 31, 32, 33,
5693 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005694
surmeh013537c2c2018-05-18 16:31:43 +01005695 37, 38, 39,
5696 40, 41, 42,
5697 43, 44, 45,
5698 46, 47, 48,
5699 49, 50, 51,
5700 52, 53, 54,
5701 })
telsoa014fcda012018-03-09 14:13:49 +00005702 );
5703
telsoa014fcda012018-03-09 14:13:49 +00005704 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
5705 {
surmeh013537c2c2018-05-18 16:31:43 +01005706 1, 2, 3,
5707 4, 5, 6,
5708 7, 8, 9,
5709 10, 11, 12,
5710 13, 14, 15,
5711 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005712
surmeh013537c2c2018-05-18 16:31:43 +01005713 19, 20, 21,
5714 22, 23, 24,
5715 25, 26, 27,
5716 28, 29, 30,
5717 31, 32, 33,
5718 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005719 })
5720 );
5721
5722 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
5723 {
surmeh013537c2c2018-05-18 16:31:43 +01005724 37, 38, 39,
5725 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00005726 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01005727 46, 47, 48,
5728 49, 50, 51,
5729 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00005730 })
5731 );
5732
telsoa01c577f2c2018-08-31 09:22:23 +01005733 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00005734 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
5735
telsoa01c577f2c2018-08-31 09:22:23 +01005736 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00005737 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
5738
telsoa014fcda012018-03-09 14:13:49 +00005739
5740 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5741
5742 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
5743
5744 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
5745 subTensorsSupported ?
5746 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
5747 workloadFactory.CreateTensorHandle(inputTensorInfo1);
5748
5749 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
5750 subTensorsSupported ?
5751 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
5752 workloadFactory.CreateTensorHandle(inputTensorInfo2);
5753
telsoa014fcda012018-03-09 14:13:49 +00005754
5755 armnn::MergerQueueDescriptor data;
5756 armnn::WorkloadInfo info;
5757 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5758 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00005759 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5760
5761 data.m_ViewOrigins.push_back(window1);
5762 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00005763
5764 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
5765
5766 inputHandle1->Allocate();
5767 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005768 outputHandle->Allocate();
5769
5770 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
5771 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005772
5773 workload->Execute();
5774
5775 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
5776
5777 return ret;
5778}
5779
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005780LayerTestResult<uint8_t, 4> AdditionUint8Test(
5781 armnn::IWorkloadFactory& workloadFactory,
5782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005783{
5784 unsigned int batchSize = 1;
5785 unsigned int channels = 2;
5786 unsigned int height = 2;
5787 unsigned int width = 3;
5788
5789 const float scale = 7.0f;
5790 const int32_t offset = 3;
5791
5792 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
5793 armnn::TensorInfo outputTensorInfo;
5794
5795 const unsigned int shape[] = { batchSize, channels, height, width };
5796 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5797 inputTensorInfo1.SetQuantizationScale(scale);
5798 inputTensorInfo1.SetQuantizationOffset(offset);
5799
5800 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5801 inputTensorInfo2.SetQuantizationScale(scale);
5802 inputTensorInfo2.SetQuantizationOffset(offset);
5803
5804 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5805 outputTensorInfo.SetQuantizationScale(scale);
5806 outputTensorInfo.SetQuantizationOffset(offset);
5807
telsoa01c577f2c2018-08-31 09:22:23 +01005808 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005809 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5810 {
5811 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
5812 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
5813 }));
5814
telsoa01c577f2c2018-08-31 09:22:23 +01005815 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005816 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5817 {
5818 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
5819 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
5820 }));
5821
telsoa01c577f2c2018-08-31 09:22:23 +01005822 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005823 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5824 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
5825 {
5826 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
5827 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
5828 }));
5829
5830 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
5831 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
5832 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5833
5834 armnn::AdditionQueueDescriptor data;
5835 armnn::WorkloadInfo info;
5836 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5837 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
5838 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5839
5840 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
5841
5842 inputHandle1->Allocate();
5843 inputHandle2->Allocate();
5844 outputHandle->Allocate();
5845
5846 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
5847 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
5848
5849 workload->Execute();
5850
5851 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5852
5853 return result;
5854}
5855
surmeh01bceff2f2018-03-29 16:29:27 +01005856namespace
telsoa014fcda012018-03-09 14:13:49 +00005857{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005858LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
5859 armnn::IWorkloadFactory& workloadFactory,
5860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5861 const unsigned int shape0[4],
5862 const std::vector<uint8_t> & values0,
5863 float scale0,
5864 int32_t offset0,
5865 const unsigned int shape1[4],
5866 const std::vector<uint8_t> & values1,
5867 float scale1,
5868 int32_t offset1,
5869 const unsigned int outShape[4],
5870 const std::vector<uint8_t> & outValues,
5871 float outScale,
5872 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01005873{
5874 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
5875 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
5876 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00005877
surmeh01bceff2f2018-03-29 16:29:27 +01005878 inputTensorInfo0.SetQuantizationScale(scale0);
5879 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00005880
surmeh01bceff2f2018-03-29 16:29:27 +01005881 inputTensorInfo1.SetQuantizationScale(scale1);
5882 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00005883
surmeh01bceff2f2018-03-29 16:29:27 +01005884 outputTensorInfo.SetQuantizationScale(outScale);
5885 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00005886
surmeh01bceff2f2018-03-29 16:29:27 +01005887 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
5888 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00005889
telsoa014fcda012018-03-09 14:13:49 +00005890 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01005891 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00005892
surmeh01bceff2f2018-03-29 16:29:27 +01005893 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00005894 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00005895 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5896
5897 armnn::MultiplicationQueueDescriptor data;
5898 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01005899 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
5900 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00005901 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5902
5903 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
5904
surmeh01bceff2f2018-03-29 16:29:27 +01005905 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005906 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005907 outputHandle->Allocate();
5908
surmeh01bceff2f2018-03-29 16:29:27 +01005909 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005910 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005911
5912 workload->Execute();
5913
5914 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5915
5916 return result;
5917}
surmeh01bceff2f2018-03-29 16:29:27 +01005918} // anonymous namespace
5919
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005920LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
5921 armnn::IWorkloadFactory& workloadFactory,
5922 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01005923{
5924 unsigned int batchSize = 1;
5925 unsigned int channels = 2;
5926 unsigned int height = 2;
5927 unsigned int width = 3;
5928 const unsigned int shape[] = { batchSize, channels, height, width };
5929
telsoa01c577f2c2018-08-31 09:22:23 +01005930 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005931 std::vector<uint8_t> input0({
5932 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
5933 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
5934 });
5935
telsoa01c577f2c2018-08-31 09:22:23 +01005936 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005937 std::vector<uint8_t> input1({
5938 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
5939 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
5940 });
5941
telsoa01c577f2c2018-08-31 09:22:23 +01005942 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005943 std::vector<uint8_t> output(
5944 {
5945 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
5946 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
5947 });
5948
5949 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005950 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01005951 shape,
5952 input0,
5953 4.0f,
5954 1,
5955 shape,
5956 input1,
5957 3.0f,
5958 -2,
5959 shape,
5960 output,
telsoa01c577f2c2018-08-31 09:22:23 +01005961 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01005962 -5);
5963}
5964
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005965LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
5966 armnn::IWorkloadFactory& workloadFactory,
5967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01005968{
5969 const unsigned int shape0[] = { 1, 2, 2, 3 };
5970 const unsigned int shape1[] = { 1, 1, 1, 1 };
5971
5972 std::vector<uint8_t> input0({
5973 1, 2, 3, 4, 5, 6,
5974 7, 8, 9, 10, 11, 12
5975 });
5976
5977 std::vector<uint8_t> input1({2});
5978
5979 std::vector<uint8_t> output({
5980 2, 4, 6, 8, 10, 12,
5981 14, 16, 18, 20, 22, 24
5982 });
5983
5984 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005985 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01005986 shape0,
5987 input0,
5988 1.0f,
5989 0,
5990 shape1,
5991 input1,
5992 1.0f,
5993 0,
5994 shape0,
5995 output,
5996 1.0f,
5997 0);
5998}
5999
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006000LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
6001 armnn::IWorkloadFactory& workloadFactory,
6002 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006003{
6004 const unsigned int shape0[] = { 1, 2, 2, 3 };
6005 const unsigned int shape1[] = { 1, 1, 1, 3 };
6006
6007 std::vector<uint8_t> input0({
6008 1, 2, 3, 4, 5, 6,
6009 7, 8, 9, 10, 11, 12
6010 });
6011
6012 std::vector<uint8_t> input1({1, 2, 3});
6013
6014 std::vector<uint8_t> output({
6015 1, 4, 9, 4, 10, 18,
6016 7, 16, 27, 10, 22, 36
6017 });
6018
6019 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006020 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01006021 shape0,
6022 input0,
6023 1.0f,
6024 0,
6025 shape1,
6026 input1,
6027 1.0f,
6028 0,
6029 shape0,
6030 output,
6031 1.0f,
6032 0);
6033}
telsoa014fcda012018-03-09 14:13:49 +00006034
David Beckf195f032018-09-06 16:46:34 +01006035namespace
6036{
6037template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006038LayerTestResult<T, 4> SubtractionTestHelper(
6039 armnn::IWorkloadFactory& workloadFactory,
6040 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6041 const unsigned int shape0[4],
6042 const std::vector<T>& values0,
6043 float scale0,
6044 int32_t offset0,
6045 const unsigned int shape1[4],
6046 const std::vector<T> & values1,
6047 float scale1,
6048 int32_t offset1,
6049 const unsigned int outShape[4],
6050 const std::vector<T> & outValues,
6051 float outScale,
6052 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01006053{
6054 auto dataType = (std::is_same<T, uint8_t>::value ?
6055 armnn::DataType::QuantisedAsymm8 :
6056 armnn::DataType::Float32);
6057
6058 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6059 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6060 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
6061
6062 inputTensorInfo0.SetQuantizationScale(scale0);
6063 inputTensorInfo0.SetQuantizationOffset(offset0);
6064
6065 inputTensorInfo1.SetQuantizationScale(scale1);
6066 inputTensorInfo1.SetQuantizationOffset(offset1);
6067
6068 outputTensorInfo.SetQuantizationScale(outScale);
6069 outputTensorInfo.SetQuantizationOffset(outOffset);
6070
6071 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6072 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
6073
6074 LayerTestResult<T, 4> result(outputTensorInfo);
6075 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6076
6077 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6078 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6079 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6080
6081 armnn::SubtractionQueueDescriptor data;
6082 armnn::WorkloadInfo info;
6083 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6084 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6085 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6086
6087 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
6088
6089 inputHandle0->Allocate();
6090 inputHandle1->Allocate();
6091 outputHandle->Allocate();
6092
6093 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
6094 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
6095
David Beckf195f032018-09-06 16:46:34 +01006096 workload->Execute();
6097
6098 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6099
6100 return result;
6101}
6102} // anonymous namespace
6103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006104LayerTestResult<uint8_t, 4> SubtractionUint8Test(
6105 armnn::IWorkloadFactory& workloadFactory,
6106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006107{
6108 const unsigned int shape0[] = { 1, 1, 2, 2 };
6109 const unsigned int shape1[] = { 1, 1, 2, 2 };
6110
6111 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6112 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
6113 std::vector<uint8_t> output({ 3, 3, 5, 5 });
6114
6115 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006116 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006117 shape0, input0, 0.5f, 2,
6118 shape1, input1, 1.0f, 0,
6119 shape0, output, 1.0f, 0);
6120}
6121
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006122LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
6123 armnn::IWorkloadFactory& workloadFactory,
6124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006125{
6126 const unsigned int shape0[] = { 1, 1, 2, 2 };
6127 const unsigned int shape1[] = { 1, 1, 1, 1 };
6128
6129 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6130 std::vector<uint8_t> input1({ 2 });
6131 std::vector<uint8_t> output({ 5, 6, 7, 8 });
6132
6133 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006134 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006135 shape0, input0, 0.5f, 2,
6136 shape1, input1, 1.0f, 0,
6137 shape0, output, 1.0f, 3);
6138}
6139
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006140LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
6141 armnn::IWorkloadFactory& workloadFactory,
6142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006143{
6144 const unsigned int shape0[] = { 1, 1, 2, 2 };
6145 const unsigned int shape1[] = { 1, 1, 2, 1 };
6146
6147 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
6148 std::vector<uint8_t> input1({ 2, 1 });
6149 std::vector<uint8_t> output({ 8, 11, 12, 15 });
6150
6151 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006152 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006153 shape0, input0, 1.0f, 0,
6154 shape1, input1, 1.0f, 0,
6155 shape0, output, 1.0f, 0);
6156}
6157
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006158LayerTestResult<float, 4> SubtractionTest(
6159 armnn::IWorkloadFactory& workloadFactory,
6160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006161{
6162 const unsigned int shape0[] = { 1, 1, 2, 2 };
6163 const unsigned int shape1[] = { 1, 1, 2, 2 };
6164
6165 std::vector<float> input0({ 1, 2, 3, 4 });
6166 std::vector<float> input1({ 1, -1, 0, 2 });
6167 std::vector<float> output({ 0, 3, 3, 2 });
6168
6169 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006170 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006171 shape0, input0, 1.0f, 0,
6172 shape1, input1, 1.0f, 0,
6173 shape0, output, 1.0f, 0);
6174}
6175
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006176LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
6177 armnn::IWorkloadFactory& workloadFactory,
6178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006179{
6180 const unsigned int shape0[] = { 1, 1, 2, 2 };
6181 const unsigned int shape1[] = { 1, 1, 1, 1 };
6182
6183 std::vector<float> input0({ 1, 2, 3, 4 });
6184 std::vector<float> input1({ 10 });
6185 std::vector<float> output({ -9, -8, -7, -6 });
6186
6187 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006188 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006189 shape0, input0, 1.0f, 0,
6190 shape1, input1, 1.0f, 0,
6191 shape0, output, 1.0f, 0);
6192}
6193
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006194LayerTestResult<float, 4> SubtractionBroadcastTest(
6195 armnn::IWorkloadFactory& workloadFactory,
6196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006197{
6198 const unsigned int shape0[] = { 1, 1, 2, 2 };
6199 const unsigned int shape1[] = { 1, 1, 1, 2 };
6200
6201 std::vector<float> input0({ 1, 2, 3, 4 });
6202 std::vector<float> input1({ 10, -5 });
6203 std::vector<float> output({ -9, 7, -7, 9 });
6204
6205 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006206 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006207 shape0, input0, 1.0f, 0,
6208 shape1, input1, 1.0f, 0,
6209 shape0, output, 1.0f, 0);
6210}
6211
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006212LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
6213 armnn::IWorkloadFactory& workloadFactory,
6214 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006215{
6216 constexpr unsigned int inputWidth = 4;
6217 constexpr unsigned int inputHeight = 4;
6218 constexpr unsigned int inputChannels = 1;
6219 constexpr unsigned int inputBatchSize = 1;
6220
6221 constexpr unsigned int outputWidth = inputWidth;
6222 constexpr unsigned int outputHeight = inputHeight;
6223 constexpr unsigned int outputChannels = inputChannels;
6224 constexpr unsigned int outputBatchSize = inputBatchSize;
6225
6226 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6227 armnn::DataType::QuantisedAsymm8);
6228 inputTensorInfo.SetQuantizationScale(1.5f);
6229 inputTensorInfo.SetQuantizationOffset(-3);
6230
6231 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6232 armnn::DataType::QuantisedAsymm8);
6233 outputTensorInfo.SetQuantizationScale(1.5f);
6234 outputTensorInfo.SetQuantizationOffset(-3);
6235
6236 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6237 1, 2, 3, 4,
6238 2, 3, 4, 5,
6239 3, 4, 5, 6,
6240 4, 5, 6, 7
6241 }));
6242
6243 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6244 result.outputExpected = input;
6245
6246 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6247 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6248
6249 armnn::ResizeBilinearQueueDescriptor descriptor;
6250 armnn::WorkloadInfo info;
6251 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6252 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6253
6254 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6255
6256 inputHandle->Allocate();
6257 outputHandle->Allocate();
6258 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6259
6260 workload->Execute();
6261
6262 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6263 return result;
6264}
6265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006266LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
6267 armnn::IWorkloadFactory& workloadFactory,
6268 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006269{
6270 constexpr unsigned int inputWidth = 2;
6271 constexpr unsigned int inputHeight = 2;
6272 constexpr unsigned int inputChannels = 1;
6273 constexpr unsigned int inputBatchSize = 1;
6274
6275 constexpr unsigned int outputWidth = inputWidth / 2;
6276 constexpr unsigned int outputHeight = inputHeight / 2;
6277 constexpr unsigned int outputChannels = inputChannels;
6278 constexpr unsigned int outputBatchSize = inputBatchSize;
6279
6280 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6281 armnn::DataType::QuantisedAsymm8);
6282 inputTensorInfo.SetQuantizationScale(0.1567f);
6283 inputTensorInfo.SetQuantizationOffset(1);
6284
6285 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6286 armnn::DataType::QuantisedAsymm8);
6287 outputTensorInfo.SetQuantizationScale(0.1567f);
6288 outputTensorInfo.SetQuantizationOffset(1);
6289
6290 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6291 1, 255,
6292 200, 250
6293 }));
6294
6295 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
6296 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01006297 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00006298 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
6299 // the centre).
6300 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6301 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6302 1
6303 }));
6304
6305 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6306 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6307
6308 armnn::ResizeBilinearQueueDescriptor descriptor;
6309 armnn::WorkloadInfo info;
6310 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6311 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6312
6313 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6314
6315 inputHandle->Allocate();
6316 outputHandle->Allocate();
6317 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6318
6319 workload->Execute();
6320
6321 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6322 return result;
6323}
6324
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006325LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
6326 armnn::IWorkloadFactory& workloadFactory,
6327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006328{
6329 constexpr unsigned int inputWidth = 4;
6330 constexpr unsigned int inputHeight = 4;
6331 constexpr unsigned int inputChannels = 1;
6332 constexpr unsigned int inputBatchSize = 1;
6333
6334 constexpr unsigned int outputWidth = inputWidth / 2;
6335 constexpr unsigned int outputHeight = inputHeight / 2;
6336 constexpr unsigned int outputChannels = inputChannels;
6337 constexpr unsigned int outputBatchSize = inputBatchSize;
6338
6339 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6340 armnn::DataType::QuantisedAsymm8);
6341 inputTensorInfo.SetQuantizationScale(3.141592f);
6342 inputTensorInfo.SetQuantizationOffset(3);
6343
6344 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6345 armnn::DataType::QuantisedAsymm8);
6346 outputTensorInfo.SetQuantizationScale(3.141592f);
6347 outputTensorInfo.SetQuantizationOffset(3);
6348
6349 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6350 1, 2, 3, 4,
6351 2, 3, 4, 5,
6352 3, 4, 5, 6,
6353 4, 5, 6, 7
6354 }));
6355
6356 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6357 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6358 1, 3,
6359 3, 5
6360 }));
6361
6362 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6363 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6364
6365 armnn::ResizeBilinearQueueDescriptor descriptor;
6366 armnn::WorkloadInfo info;
6367 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6368 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6369
6370 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6371
6372 inputHandle->Allocate();
6373 outputHandle->Allocate();
6374 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6375
6376 workload->Execute();
6377
6378 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6379 return result;
6380}
6381
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006382LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
6383 armnn::IWorkloadFactory& workloadFactory,
6384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006385{
6386 constexpr unsigned int inputWidth = 3;
6387 constexpr unsigned int inputHeight = 2;
6388 constexpr unsigned int inputChannels = 1;
6389 constexpr unsigned int inputBatchSize = 1;
6390
6391 constexpr unsigned int outputWidth = 2;
6392 constexpr unsigned int outputHeight = 1;
6393 constexpr unsigned int outputChannels = inputChannels;
6394 constexpr unsigned int outputBatchSize = inputBatchSize;
6395
6396 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6397 armnn::DataType::QuantisedAsymm8);
6398 inputTensorInfo.SetQuantizationScale(1.5f);
6399 inputTensorInfo.SetQuantizationOffset(-1);
6400
6401 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6402 armnn::DataType::QuantisedAsymm8);
6403 outputTensorInfo.SetQuantizationScale(1.5f);
6404 outputTensorInfo.SetQuantizationOffset(-1);
6405
6406 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6407 1, 2, 3, // 3.0, 4.5, 6.0
6408 5, 8, 13 // 9.0, 13.5, 21.0
6409 }));
6410
6411 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6412 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6413 1, 3 // 3.0, 5.25
6414 }));
6415
6416 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6417 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6418
6419 armnn::ResizeBilinearQueueDescriptor descriptor;
6420 armnn::WorkloadInfo info;
6421 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6422 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6423
6424 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6425
6426 inputHandle->Allocate();
6427 outputHandle->Allocate();
6428
6429 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6430
6431 workload->Execute();
6432
6433 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6434 return result;
6435}
6436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006437LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
6438 armnn::IWorkloadFactory& workloadFactory,
6439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006440{
6441 constexpr unsigned int inputWidth = 2;
6442 constexpr unsigned int inputHeight = 3;
6443 constexpr unsigned int inputChannels = 1;
6444 constexpr unsigned int inputBatchSize = 1;
6445
6446 constexpr unsigned int outputWidth = 5;
6447 constexpr unsigned int outputHeight = 3;
6448 constexpr unsigned int outputChannels = inputChannels;
6449 constexpr unsigned int outputBatchSize = inputBatchSize;
6450
6451 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6452 armnn::DataType::QuantisedAsymm8);
6453 inputTensorInfo.SetQuantizationScale(0.010765f);
6454 inputTensorInfo.SetQuantizationOffset(7);
6455
6456 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6457 armnn::DataType::QuantisedAsymm8);
6458 outputTensorInfo.SetQuantizationScale(0.010132f);
6459 outputTensorInfo.SetQuantizationOffset(-18);
6460
6461 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6462 24, 228, // 0.183005, 2.379065,
6463 105, 128, // 1.05497, 1.302565
6464 230, 71 // 2.400595, 0.68896
6465 }));
6466
6467 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6468 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6469 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
6470 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
6471 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
6472 }));
6473
6474 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6475 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6476
6477 armnn::ResizeBilinearQueueDescriptor descriptor;
6478 armnn::WorkloadInfo info;
6479 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6480 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6481
6482 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6483
6484 inputHandle->Allocate();
6485 outputHandle->Allocate();
6486 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6487
6488 workload->Execute();
6489
6490 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6491 return result;
6492}
6493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006494LayerTestResult<float, 4> BatchNormTest(
6495 armnn::IWorkloadFactory& workloadFactory,
6496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006497{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006498 // BatchSize: 1
6499 // Channels: 2
6500 // Height: 3
6501 // Width: 2
6502
6503 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6504 std::vector<float> inputValues
6505 {
6506 // Batch 0, Channel 0, Height (3) x Width (2)
6507 1.f, 4.f,
6508 4.f, 2.f,
6509 1.f, 6.f,
6510
6511 // Batch 0, Channel 1, Height (3) x Width (2)
6512 1.f, 1.f,
6513 4.f, 1.f,
6514 -2.f, 4.f
6515 };
6516 std::vector<float> expectedOutputValues
6517 {
6518 // Batch 0, Channel 0, Height (3) x Width (2)
6519 1.f, 4.f,
6520 4.f, 2.f,
6521 1.f, 6.f,
6522
6523 // Batch 0, Channel 1, Height (3) x Width (2)
6524 3.f, 3.f,
6525 4.f, 3.f,
6526 2.f, 4.f
6527 };
6528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006529 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6530 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006531 0.f, 0, armnn::DataLayout::NCHW);
6532}
6533
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006534LayerTestResult<float, 4> BatchNormNhwcTest(
6535 armnn::IWorkloadFactory& workloadFactory,
6536 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006537{
6538 // BatchSize: 1
6539 // Height: 3
6540 // Width: 2
6541 // Channels: 2
6542
6543 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6544 std::vector<float> inputValues
6545 {
6546 // Batch 0, Height 0, Width (2) x Channel (2)
6547 1.f, 1.f,
6548 4.f, 1.f,
6549
6550 // Batch 0, Height 1, Width (2) x Channel (2)
6551 4.f, 4.f,
6552 2.f, 1.f,
6553
6554 // Batch 0, Height 2, Width (2) x Channel (2)
6555 1.f, -2.f,
6556 6.f, 4.f
6557 };
6558 std::vector<float> expectedOutputValues
6559 {
6560 // Batch 0, Height 0, Width (2) x Channel (2)
6561 1.f, 3.f,
6562 4.f, 3.f,
6563
6564 // Batch 0, Height 1, Width (2) x Channel (2)
6565 4.f, 4.f,
6566 2.f, 3.f,
6567
6568 // Batch 0, Height 2, Width (2) x Channel (2)
6569 1.f, 2.f,
6570 6.f, 4.f
6571 };
6572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006573 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6574 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006575 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006576}
6577
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006578LayerTestResult<uint8_t, 4> BatchNormUint8Test(
6579 armnn::IWorkloadFactory& workloadFactory,
6580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006581{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006582 // BatchSize: 1
6583 // Channels: 2
6584 // Height: 3
6585 // Width: 2
6586
6587 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6588 std::vector<float> inputValues
6589 {
6590 // Batch 0, Channel 0, Height (3) x Width (2)
6591 1.f, 4.f,
6592 4.f, 2.f,
6593 1.f, 6.f,
6594
6595 // Batch 0, Channel 1, Height (3) x Width (2)
6596 1.f, 1.f,
6597 4.f, 1.f,
6598 -2.f, 4.f
6599 };
6600 std::vector<float> expectedOutputValues
6601 {
6602 // Batch 0, Channel 0, Height (3) x Width (2)
6603 1.f, 4.f,
6604 4.f, 2.f,
6605 1.f, 6.f,
6606
6607 // Batch 0, Channel 1, Height (3) x Width (2)
6608 3.f, 3.f,
6609 4.f, 3.f,
6610 2.f, 4.f
6611 };
6612
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006613 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6614 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006615 1.f/20.f, 50, armnn::DataLayout::NCHW);
6616}
6617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006618LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6619 armnn::IWorkloadFactory& workloadFactory,
6620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006621{
6622 // BatchSize: 1
6623 // Height: 3
6624 // Width: 2
6625 // Channels: 2
6626
6627 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6628 std::vector<float> inputValues
6629 {
6630 // Batch 0, Height 0, Width (2) x Channel (2)
6631 1.f, 1.f,
6632 4.f, 1.f,
6633
6634 // Batch 0, Height 1, Width (2) x Channel (2)
6635 4.f, 4.f,
6636 2.f, 1.f,
6637
6638 // Batch 0, Height 2, Width (2) x Channel (2)
6639 1.f, -2.f,
6640 6.f, 4.f
6641 };
6642 std::vector<float> expectedOutputValues
6643 {
6644 // Batch 0, Height 0, Width (2) x Channel (2)
6645 1.f, 3.f,
6646 4.f, 3.f,
6647
6648 // Batch 0, Height 1, Width (2) x Channel (2)
6649 4.f, 4.f,
6650 2.f, 3.f,
6651
6652 // Batch 0, Height 2, Width (2) x Channel (2)
6653 1.f, 2.f,
6654 6.f, 4.f
6655 };
6656
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006657 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6658 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006659 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006660}
6661
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006662LayerTestResult<uint8_t, 4> ConstantUint8Test(
6663 armnn::IWorkloadFactory& workloadFactory,
6664 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006665{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006666 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00006667}
6668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006669LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
6670 armnn::IWorkloadFactory& workloadFactory,
6671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006672{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006673 return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006674}
6675
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006676LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
6677 armnn::IWorkloadFactory& workloadFactory,
6678 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006679{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006680 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006681}
6682
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006683LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
6684 armnn::IWorkloadFactory& workloadFactory,
6685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006686{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006687 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006688}
6689
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006690LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
6691 armnn::IWorkloadFactory& workloadFactory,
6692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006693{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006694 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006695}
6696
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006697LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
6698 armnn::IWorkloadFactory& workloadFactory,
6699 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006700{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006701 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006702}
6703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006704LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
6705 armnn::IWorkloadFactory& workloadFactory,
6706 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006707{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006708 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006709}
6710
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006711LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
6712 armnn::IWorkloadFactory& workloadFactory,
6713 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006714{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006715 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006716}
6717
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006718LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
6719 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6721 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00006722{
narpra015cdda352018-11-19 15:30:27 +00006723 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006724}
6725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006726LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
6727 armnn::IWorkloadFactory& workloadFactory,
6728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006729{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006730 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006731}
6732
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006733LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
6734 armnn::IWorkloadFactory& workloadFactory,
6735 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006736{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006737 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006738}
6739
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006740LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
6741 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6743 bool useSubtensor)
6744{
6745 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
6746}
6747
6748LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
6749 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006751{
narpra015cdda352018-11-19 15:30:27 +00006752 return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6753}
6754
6755LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
6756 armnn::IWorkloadFactory& workloadFactory,
6757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6758{
6759 return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6760}
6761
6762LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
6763 armnn::IWorkloadFactory& workloadFactory,
6764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6765{
6766 return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6767}
6768
6769LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
6770 armnn::IWorkloadFactory& workloadFactory,
6771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
6772{
6773 return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
6774}
6775
6776LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
6777 armnn::IWorkloadFactory& workloadFactory,
6778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6779{
6780 return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6781}
6782
6783LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
6784 armnn::IWorkloadFactory& workloadFactory,
6785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6786{
6787 return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6788}
6789
6790LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
6791 armnn::IWorkloadFactory& workloadFactory,
6792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6793{
6794 return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6795}
6796
6797LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
6798 armnn::IWorkloadFactory& workloadFactory,
6799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6800 bool useSubtensor)
6801{
6802 return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006803}
6804
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006805LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
6806 armnn::IWorkloadFactory& workloadFactory,
6807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6808 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006809{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006810 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00006811}
6812
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006813LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
6814 armnn::IWorkloadFactory& workloadFactory,
6815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6816 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006817{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006818 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
6819 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00006820}
6821
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006822LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
6823 armnn::IWorkloadFactory& workloadFactory,
6824 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6825 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006826{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006827 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00006828}
6829
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006830LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
6831 armnn::IWorkloadFactory& workloadFactory,
6832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6833 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006834{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006835 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
6836 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00006837}
6838
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006839LayerTestResult<float, 4> SimpleMaxPooling2dTest(
6840 armnn::IWorkloadFactory& workloadFactory,
6841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006842 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006843{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006844 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006845}
6846
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006847LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
6848 armnn::IWorkloadFactory& workloadFactory,
6849 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006850 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01006851{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006852 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01006853}
6854
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006855LayerTestResult<float, 4> SimpleAveragePooling2dTest(
6856 armnn::IWorkloadFactory& workloadFactory,
6857 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006858 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006859{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006860 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01006861}
6862
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006863LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
6864 armnn::IWorkloadFactory& workloadFactory,
6865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006866 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01006867{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006868 return SimpleAveragePooling2dTestCommon<uint8_t>(
6869 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00006870}
6871
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006872LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
6873 armnn::IWorkloadFactory& workloadFactory,
6874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6875 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01006876{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006877 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
6878 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01006879}
6880
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006881LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
6882 armnn::IWorkloadFactory& workloadFactory,
6883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006884{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006885 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006886}
6887
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006888LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
6889 armnn::IWorkloadFactory& workloadFactory,
6890 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006891{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006892 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00006893}
6894
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006895LayerTestResult<float, 4> SimpleL2Pooling2dTest(
6896 armnn::IWorkloadFactory& workloadFactory,
6897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006898 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006899{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006900 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006901}
6902
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006903LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
6904 armnn::IWorkloadFactory& workloadFactory,
6905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006906 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006907{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006908 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006909}
6910
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006911LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
6912 armnn::IWorkloadFactory& workloadFactory,
6913 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006914{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006915 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006916}
6917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006918LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
6919 armnn::IWorkloadFactory& workloadFactory,
6920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006921{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006922 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006923}
6924
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006925LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
6926 armnn::IWorkloadFactory& workloadFactory,
6927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006928{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006929 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006930}
6931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006932LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
6933 armnn::IWorkloadFactory& workloadFactory,
6934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006935{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006936 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006937}
6938
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006939LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
6940 armnn::IWorkloadFactory& workloadFactory,
6941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006942{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006943 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006944}
6945
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006946LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
6947 armnn::IWorkloadFactory& workloadFactory,
6948 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006949{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006950 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006951}
6952
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006953LayerTestResult<float, 4> L2Pooling2dSize7Test(
6954 armnn::IWorkloadFactory& workloadFactory,
6955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006956{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006957 return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006958}
6959
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006960LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
6961 armnn::IWorkloadFactory& workloadFactory,
6962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006963{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006964 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006965}
6966
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006967LayerTestResult<float, 4> L2Pooling2dSize9Test(
6968 armnn::IWorkloadFactory& workloadFactory,
6969 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006970{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006971 return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006972}
6973
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006974LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
6975 armnn::IWorkloadFactory& workloadFactory,
6976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006977{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006978 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006979}
6980
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006981LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
6982 armnn::IWorkloadFactory& workloadFactory,
6983 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006984{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006985 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006986}
6987
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006988LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
6989 armnn::IWorkloadFactory& workloadFactory,
6990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006991{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006992 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006993}
6994
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006995LayerTestResult<float, 4> ComparePooling2dTest(
6996 armnn::IWorkloadFactory& workloadFactory,
6997 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6998 armnn::IWorkloadFactory& refWorkloadFactory,
6999 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007000{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007001 return ComparePooling2dTestCommon<float>(
7002 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00007003}
7004
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007005LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
7006 armnn::IWorkloadFactory& workloadFactory,
7007 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7008 armnn::IWorkloadFactory& refWorkloadFactory,
7009 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00007010{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007011 return ComparePooling2dTestCommon<uint8_t>(
7012 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00007013}
7014
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007015LayerTestResult<float, 2> FullyConnectedLargeTest(
7016 armnn::IWorkloadFactory& workloadFactory,
7017 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7018 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00007019{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007020 return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00007021}
7022
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007023LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
7024 armnn::IWorkloadFactory& workloadFactory,
7025 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007026{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007027 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007028}
7029
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007030LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
7031 armnn::IWorkloadFactory& workloadFactory,
7032 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007033{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007034 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007035}
7036
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007037LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
7038 armnn::IWorkloadFactory& workloadFactory,
7039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007040{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007041 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007042}
7043
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007044LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
7045 armnn::IWorkloadFactory& workloadFactory,
7046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007047{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007048 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00007049}
7050
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007051LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
7052 armnn::IWorkloadFactory& workloadFactory,
7053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007054{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007055 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007056}
7057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007058LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
7059 armnn::IWorkloadFactory& workloadFactory,
7060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007061{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007062 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007063}
7064
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007065LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
7066 armnn::IWorkloadFactory& workloadFactory,
7067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007068{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007069 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007070}
7071
7072LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007073 armnn::IWorkloadFactory& workloadFactory,
7074 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007075{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007076 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007077}
7078
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007079LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
7080 armnn::IWorkloadFactory& workloadFactory,
7081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007082{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007083 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007084}
7085
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007086LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
7087 armnn::IWorkloadFactory& workloadFactory,
7088 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007089{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007090 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007091}
7092
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007093LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
7094 armnn::IWorkloadFactory& workloadFactory,
7095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007096{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007097 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007098}
7099
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007100LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
7101 armnn::IWorkloadFactory& workloadFactory,
7102 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007103{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007104 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007105}
7106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007107LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
7108 armnn::IWorkloadFactory& workloadFactory,
7109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007110{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007111 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007112}
7113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007114LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
7115 armnn::IWorkloadFactory& workloadFactory,
7116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007117{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007118 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007119}
7120
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007121LayerTestResult<float, 4> SimplePermuteFloat32Test(
7122 armnn::IWorkloadFactory& workloadFactory,
7123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007124{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007125 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007126};
7127
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007128LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
7129 armnn::IWorkloadFactory& workloadFactory,
7130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007131{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007132 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00007133};
surmeh01bceff2f2018-03-29 16:29:27 +01007134
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007135LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
7136 armnn::IWorkloadFactory& workloadFactory,
7137 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007138{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007139 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007140};
7141
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007142LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
7143 armnn::IWorkloadFactory& workloadFactory,
7144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007145{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007146 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01007147};
7148
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007149LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
7150 armnn::IWorkloadFactory& workloadFactory,
7151 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007152{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007153 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01007154};
7155
7156namespace
7157{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007158
narpra011e4c31d2018-09-28 11:07:51 +01007159template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007160LayerTestResult<T, OutputDim> MeanTestHelper(
7161 armnn::IWorkloadFactory& workloadFactory,
7162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7163 const unsigned int* inputShape,
7164 const std::vector<T>& inputData,
7165 const std::vector<unsigned int>& axis,
7166 bool keepDims,
7167 const unsigned int* outputShape,
7168 const std::vector<T>& outputData,
7169 float scale = 1.0f,
7170 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01007171{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007172 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01007173
7174 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7175 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7176
7177 inputTensorInfo.SetQuantizationScale(scale);
7178 inputTensorInfo.SetQuantizationOffset(offset);
7179
7180 outputTensorInfo.SetQuantizationScale(scale);
7181 outputTensorInfo.SetQuantizationOffset(offset);
7182
7183 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7184
7185 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7186 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7187
7188 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7189 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7190
7191 armnn::MeanQueueDescriptor data;
7192 data.m_Parameters.m_Axis = axis;
7193 data.m_Parameters.m_KeepDims = keepDims;
7194 armnn::WorkloadInfo info;
7195 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7196 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7197
7198 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
7199
7200 inputHandle->Allocate();
7201 outputHandle->Allocate();
7202
7203 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7204
narpra011e4c31d2018-09-28 11:07:51 +01007205 workload->Execute();
7206
7207 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
7208
7209 return result;
7210}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007211
narpra011e4c31d2018-09-28 11:07:51 +01007212} // anonymous namespace
7213
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007214LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
7215 armnn::IWorkloadFactory& workloadFactory,
7216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007217{
7218 const unsigned int inputShape[] = { 3, 2 };
7219 const unsigned int outputShape[] = { 1 };
7220
7221 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7222 std::vector<uint8_t> output({ 2 });
7223
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007224 return MeanTestHelper<uint8_t, 2, 1>(
7225 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007226}
7227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007228LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
7229 armnn::IWorkloadFactory& workloadFactory,
7230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007231{
7232 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7233 const unsigned int outputShape[] = { 1, 1, 2 };
7234
7235 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7236 std::vector<uint8_t> output({ 2, 2 });
7237
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007238 return MeanTestHelper<uint8_t, 4, 3>(
7239 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007240}
7241
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007242LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
7243 armnn::IWorkloadFactory& workloadFactory,
7244 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007245{
7246 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7247 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7248
7249 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7250 std::vector<uint8_t> output({ 2, 2 });
7251
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007252 return MeanTestHelper<uint8_t, 4, 4>(
7253 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007254}
7255
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007256LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
7257 armnn::IWorkloadFactory& workloadFactory,
7258 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007259{
7260 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7261 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7262
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007263 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01007264 std::vector<uint8_t> output({ 1, 3, 5 });
7265
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007266 return MeanTestHelper<uint8_t, 4, 4>(
7267 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007268}
7269
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007270LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
7271 armnn::IWorkloadFactory& workloadFactory,
7272 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007273{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007274 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007275 const unsigned int outputShape[] = { 2 };
7276
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007277 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
7278 24 });
7279 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01007280
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007281 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
7282 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007283 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01007284}
7285
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007286LayerTestResult<float, 1> MeanFloatSimpleTest(
7287 armnn::IWorkloadFactory& workloadFactory,
7288 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007289{
7290 const unsigned int inputShape[] = { 3, 2 };
7291 const unsigned int outputShape[] = { 1 };
7292
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007293 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7294 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007295
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007296 return MeanTestHelper<float, 2, 1>(
7297 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007298}
7299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007300LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
7301 armnn::IWorkloadFactory& workloadFactory,
7302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007303{
7304 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7305 const unsigned int outputShape[] = { 3, 1, 2 };
7306
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007307 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7308 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007310 return MeanTestHelper<float, 4, 3>(
7311 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007312}
7313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007314LayerTestResult<float, 4> MeanFloatKeepDimsTest(
7315 armnn::IWorkloadFactory& workloadFactory,
7316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007317{
7318 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7319 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7320
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007321 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7322 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007323
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007324 return MeanTestHelper<float, 4, 4>(
7325 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007326}
7327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007328LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
7329 armnn::IWorkloadFactory& workloadFactory,
7330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007331{
7332 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7333 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7334
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007335 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7336 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007338 return MeanTestHelper<float, 4, 4>(
7339 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007340}
7341
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007342LayerTestResult<float, 1> MeanVtsFloat1Test(
7343 armnn::IWorkloadFactory& workloadFactory,
7344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007345{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007346 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007347 const unsigned int outputShape[] = { 2 };
7348
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007349 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7350 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7351 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007352
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007353 return MeanTestHelper<float, 3, 1>(
7354 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007355}
7356
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007357LayerTestResult<float, 3> MeanVtsFloat2Test(
7358 armnn::IWorkloadFactory& workloadFactory,
7359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007360{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007361 const unsigned int inputShape[] = { 4, 3, 2 };
7362 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01007363
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007364 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7365 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7366 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007367
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007368 return MeanTestHelper<float, 3, 3>(
7369 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007370}
7371
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007372LayerTestResult<float, 3> MeanVtsFloat3Test(
7373 armnn::IWorkloadFactory& workloadFactory,
7374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007375{
7376 const unsigned int inputShape[] = { 1, 2, 2, 1 };
7377 const unsigned int outputShape[] = { 1, 2, 1 };
7378
7379 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
7380 std::vector<float> output({ 1.5f, 3.5f });
7381
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007382 return MeanTestHelper<float, 4, 3>(
7383 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007384}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007385
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007386LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
7387 armnn::IWorkloadFactory& workloadFactory,
7388 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007389{
7390 // Create Initial Tensor
7391 // 1, 2, 3
7392 // 4, 5, 6
7393 // 7, 8, 9
7394
7395 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
7396 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
7397
7398 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
7399 {1, 2, 3,
7400 4, 5, 6,
7401 7, 8, 9
7402 });
7403
7404 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
7405 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
7406 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
7407 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
7408
7409 // Apply MaxPool poolSize = 1x1, stride=2x2
7410 // Result =
7411 // 1, 3
7412 // 7, 9
7413 armnn::Pooling2dDescriptor descriptor;
7414 descriptor.m_PoolHeight = 1;
7415 descriptor.m_PoolWidth = 1;
7416 descriptor.m_StrideX = 2;
7417 descriptor.m_StrideY = 2;
7418 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
7419
7420 armnn::Pooling2dQueueDescriptor queueDescriptor;
7421 queueDescriptor.m_Parameters = descriptor;
7422 armnn::WorkloadInfo workloadInfo;
7423 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
7424 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
7425
7426 // Create the MaxPool
7427 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
7428
7429 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
7430 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
7431 boost::multi_array<float, 4> resultMaxPool;
7432 resultMaxPool.resize(shape);
7433
7434
7435 // Create addition with another tensor the same size
7436 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
7437 // with the initial tensor.
7438 // 12, 16
7439 // 24, 28
7440
7441 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7442 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7443
7444 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
7445 {12, 16,
7446 24, 28,
7447 });
7448
7449 // Expected output tensor after MaxPool and Addition.
7450 LayerTestResult<float,4> addRet(addOutputTensorInfo);
7451 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
7452 {
7453 13, 19,
7454 31, 37
7455 }));
7456
7457 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
7458 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
7459
7460 armnn::AdditionQueueDescriptor data;
7461 armnn::WorkloadInfo info;
7462
7463 // Add the output of the MaxPool and the new tensor
7464 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
7465 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
7466 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
7467
7468 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
7469
7470 poolingInputHandle->Allocate();
7471 poolingOutputHandle->Allocate();
7472 addInputHandle->Allocate();
7473 addOutputHandle->Allocate();
7474
7475 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
7476 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
7477
7478 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
7479 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
7480
7481 workload->Execute();
7482 addWorkload->Execute();
7483
7484 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
7485
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007486 return addRet;
7487}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007488
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007489LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
7490 armnn::IWorkloadFactory& workloadFactory,
7491 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007492{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007493 return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007494}
7495
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007496LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
7497 armnn::IWorkloadFactory& workloadFactory,
7498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007499{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007500 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007501}
7502
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007503LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
7504 armnn::IWorkloadFactory& workloadFactory,
7505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007506{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007507 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007508}
7509
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007510LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
7511 armnn::IWorkloadFactory& workloadFactory,
7512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007513{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007514 return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007515}
7516
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007517LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
7518 armnn::IWorkloadFactory& workloadFactory,
7519 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007520{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007521 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007522}
7523
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007524LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
7525 armnn::IWorkloadFactory& workloadFactory,
7526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007527{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007528 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007529}
7530
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007531LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
7532 armnn::IWorkloadFactory& workloadFactory,
7533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007534{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007535 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007536}
7537
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007538LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
7539 armnn::IWorkloadFactory& workloadFactory,
7540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007541{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007542 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007543}
7544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007545LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
7546 armnn::IWorkloadFactory& workloadFactory,
7547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007548{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007549 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007550}
7551
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007552LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
7553 armnn::IWorkloadFactory& workloadFactory,
7554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007555{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007556 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007557}
7558
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007559LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
7560 armnn::IWorkloadFactory& workloadFactory,
7561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007562{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007563 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007564}
7565
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007566LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
7567 armnn::IWorkloadFactory& workloadFactory,
7568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007569{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007570 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007571}
7572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007573LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
7574 armnn::IWorkloadFactory& workloadFactory,
7575 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007576{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007577 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007578}
7579
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007580LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
7581 armnn::IWorkloadFactory& workloadFactory,
7582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007583{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007584 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007585}
7586
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007587LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
7588 armnn::IWorkloadFactory& workloadFactory,
7589 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007590{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007591 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007592}
7593
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007594LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
7595 armnn::IWorkloadFactory& workloadFactory,
7596 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007597{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007598 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007599}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007600
7601namespace {
7602
7603template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007604LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
7605 armnn::IWorkloadFactory &workloadFactory,
7606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7607 const armnn::DataLayout& dataLayout,
7608 const unsigned int *inputShape,
7609 const std::vector<T> &inputData,
7610 const std::vector<unsigned int> &blockShape,
7611 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
7612 const unsigned int *outputShape,
7613 const std::vector<T> &outputData,
7614 float scale = 1.0f,
7615 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007616 {
7617 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
7618
7619 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7620 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7621
7622 inputTensorInfo.SetQuantizationScale(scale);
7623 inputTensorInfo.SetQuantizationOffset(offset);
7624
7625 outputTensorInfo.SetQuantizationScale(scale);
7626 outputTensorInfo.SetQuantizationOffset(offset);
7627
7628 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7629
7630 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7631 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7632
7633 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7634 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7635
7636 armnn::BatchToSpaceNdQueueDescriptor data;
7637 data.m_Parameters.m_DataLayout = dataLayout;
7638 data.m_Parameters.m_BlockShape = blockShape;
7639 data.m_Parameters.m_Crops = crops;
7640 armnn::WorkloadInfo info;
7641 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7642 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7643
7644 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
7645
7646 inputHandle->Allocate();
7647 outputHandle->Allocate();
7648
7649 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7650
7651 workload->Execute();
7652
7653 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7654
7655 return result;
7656}
7657
7658} // anonymous namespace
7659
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007660LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
7661 armnn::IWorkloadFactory& workloadFactory,
7662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007663{
7664 const unsigned int inputShape[] = {4, 2, 2, 1};
7665 const unsigned int outputShape[] = {1, 4, 4, 1 };
7666
7667 std::vector<float> input
7668 ({
7669 // Batch 0, Height 0, Width (2) x Channel (1)
7670 1.0f, 3.0f,
7671 // Batch 0, Height 1, Width (2) x Channel (1)
7672 9.0f, 11.0f,
7673
7674
7675 // Batch 1, Height 0, Width (2) x Channel (1)
7676 2.0f, 4.0f,
7677 // Batch 1, Height 1, Width (2) x Channel (1)
7678 10.0f, 12.0f,
7679
7680
7681 // Batch 2, Height 0, Width (2) x Channel (1)
7682 5.0f, 7.0f,
7683 // Batch 2, Height 1, Width (2) x Channel (1)
7684 13.0f, 15.0f,
7685
7686 // Batch 3, Height 0, Width (2) x Channel (3)
7687 6.0f, 8.0f,
7688 // Batch 3, Height 1, Width (2) x Channel (1)
7689 14.0f, 16.0f
7690 });
7691
7692 std::vector<float> expectedOutput
7693 ({
7694 1.0f, 2.0f, 3.0f, 4.0f,
7695 5.0f, 6.0f, 7.0f, 8.0f,
7696 9.0f, 10.0f, 11.0f, 12.0f,
7697 13.0f, 14.0f, 15.0f, 16.0f
7698 });
7699
7700 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007701 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007703 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7704 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007705 crops, outputShape, expectedOutput);
7706}
7707
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007708LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
7709 armnn::IWorkloadFactory& workloadFactory,
7710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007711{
7712 const unsigned int inputShape[] = {4, 1, 1, 1};
7713 const unsigned int outputShape[] = {1, 2, 2, 1};
7714
7715 std::vector<float> input
7716 ({
7717 // Batch 0, Height 0, Width (2) x Channel (1)
7718 1.0f, 2.0f, 3.0f, 4.0f
7719 });
7720
7721 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7722
7723 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007724 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007726 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7727 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7728 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007729}
7730
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007731LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
7732 armnn::IWorkloadFactory& workloadFactory,
7733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007734{
7735 const unsigned int inputShape[] = {4, 1, 1, 3};
7736 const unsigned int outputShape[] = {1, 2, 2, 3};
7737
7738 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7739
7740 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7741
7742 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007743 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007744
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007745 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7746 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7747 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007748}
7749
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007750LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
7751 armnn::IWorkloadFactory &workloadFactory,
7752 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007753{
7754 const unsigned int inputShape[] = {4, 3, 1, 1};
7755 const unsigned int outputShape[] = {1, 3, 2, 2};
7756
7757 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7758
7759 std::vector<float> expectedOutput
7760 ({
7761 // Batch 0, Channel 0, Height (2) x Width (2)
7762 1.0f, 4.0f,
7763 7.0f, 10.0f,
7764
7765 // Batch 0, Channel 1, Height (2) x Width (2)
7766 2.0f, 5.0f,
7767 8.0f, 11.0f,
7768
7769 // Batch 0, Channel 2, Height (2) x Width (2)
7770 3.0f, 6.0f,
7771 9.0f, 12.0f,
7772 });
7773
7774 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007775 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007776
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007777 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7778 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7779 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007780}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007781
Mike Kelly831faed2018-11-28 11:52:08 +00007782LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
7783 armnn::IWorkloadFactory& workloadFactory,
7784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7785{
7786 const unsigned int inputShape[] = {4, 1, 1, 1};
7787 const unsigned int outputShape[] = {1, 1, 2, 2};
7788
7789 std::vector<float> input
7790 ({
7791 // Batch 0, Height 0, Width (2) x Channel (1)
7792 1.0f, 2.0f, 3.0f, 4.0f
7793 });
7794
7795 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7796
7797 std::vector<unsigned int> blockShape({2, 2});
7798 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7799
7800 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7801 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7802 crops, outputShape, expectedOutput);
7803}
7804
7805LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
7806 armnn::IWorkloadFactory& workloadFactory,
7807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7808{
7809 const unsigned int inputShape[] = {4, 3, 1, 1};
7810 const unsigned int outputShape[] = {1, 3, 2, 2};
7811
7812 std::vector<float> input({ 1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f });
7813
7814 std::vector<float> expectedOutput
7815 ({
7816 // Batch 0, Channel 0, Height (2) x Width (2)
7817 1.0f, 7.0f,
7818 2.0f, 8.0f,
7819
7820 // Batch 0, Channel 1, Height (2) x Width (2)
7821 3.0f, 9.0f,
7822 4.0f, 10.0f,
7823
7824 // Batch 0, Channel 2, Height (2) x Width (2)
7825 5.0f, 11.0f,
7826 6.0f, 12.0f,
7827 });
7828
7829 std::vector<unsigned int> blockShape({2, 2});
7830 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7831
7832 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7833 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7834 crops, outputShape, expectedOutput);
7835}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007836
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007837LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
7838 armnn::IWorkloadFactory& workloadFactory,
7839 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007840{
7841 const unsigned int inputShape[] = {4, 2, 2, 1};
7842 const unsigned int outputShape[] = {1, 4, 4, 1};
7843
7844 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 });
7845 std::vector<uint8_t> expectedOutput({ 1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
7846
7847 std::vector<unsigned int> blockShape({2, 2});
7848 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7849
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00007850 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
7851 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007852}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00007853
7854LayerTestResult<float, 4> StridedSlice4DFloat32Test(
7855 armnn::IWorkloadFactory& workloadFactory,
7856 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7857{
7858 return StridedSlice4DTest<float>(workloadFactory, memoryManager);
7859}
7860
7861LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
7862 armnn::IWorkloadFactory& workloadFactory,
7863 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7864{
7865 return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
7866}
7867
7868LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
7869 armnn::IWorkloadFactory& workloadFactory,
7870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7871{
7872 return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
7873}
7874
7875LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
7876 armnn::IWorkloadFactory& workloadFactory,
7877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7878{
7879 return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
7880}
7881
7882LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
7883 armnn::IWorkloadFactory& workloadFactory,
7884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7885{
7886 return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
7887}
7888
7889LayerTestResult<float, 3> StridedSlice3DFloat32Test(
7890 armnn::IWorkloadFactory& workloadFactory,
7891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7892{
7893 return StridedSlice3DTest<float>(workloadFactory, memoryManager);
7894}
7895
7896LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
7897 armnn::IWorkloadFactory& workloadFactory,
7898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7899{
7900 return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
7901}
7902
7903LayerTestResult<float, 2> StridedSlice2DFloat32Test(
7904 armnn::IWorkloadFactory& workloadFactory,
7905 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7906{
7907 return StridedSlice2DTest<float>(workloadFactory, memoryManager);
7908}
7909
7910LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
7911 armnn::IWorkloadFactory& workloadFactory,
7912 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7913{
7914 return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
7915}
7916
7917LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
7918 armnn::IWorkloadFactory& workloadFactory,
7919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7920{
7921 return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
7922}
7923
7924LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
7925 armnn::IWorkloadFactory& workloadFactory,
7926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7927{
7928 return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
7929}
7930
7931LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
7932 armnn::IWorkloadFactory& workloadFactory,
7933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7934{
7935 return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
7936}
7937
7938LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
7939 armnn::IWorkloadFactory& workloadFactory,
7940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7941{
7942 return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
7943}
7944
7945LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
7946 armnn::IWorkloadFactory& workloadFactory,
7947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7948{
7949 return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
7950}
7951
7952LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
7953 armnn::IWorkloadFactory& workloadFactory,
7954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7955{
7956 return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
7957}
7958
7959LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
7960 armnn::IWorkloadFactory& workloadFactory,
7961 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7962{
7963 return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
7964}
7965
7966LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
7967 armnn::IWorkloadFactory& workloadFactory,
7968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7969{
7970 return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
7971}
7972
7973LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
7974 armnn::IWorkloadFactory& workloadFactory,
7975 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7976{
7977 return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
7978}
Mike Kelly831faed2018-11-28 11:52:08 +00007979LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
7980 armnn::IWorkloadFactory& workloadFactory,
7981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7982{
7983 const unsigned int inputShape[] = {4, 1, 1, 1};
7984 const unsigned int outputShape[] = {1, 2, 2, 1};
7985
7986 std::vector<uint8_t> input
7987 ({
7988 // Batch 0, Height 0, Width (2) x Channel (1)
7989 1, 2, 3, 4
7990 });
7991
7992 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
7993
7994 std::vector<unsigned int> blockShape({2, 2});
7995 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7996
7997 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7998 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7999 crops, outputShape, expectedOutput);
8000}
8001
8002LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
8003 armnn::IWorkloadFactory& workloadFactory,
8004 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8005{
8006 const unsigned int inputShape[] = {4, 1, 1, 3};
8007 const unsigned int outputShape[] = {1, 2, 2, 3};
8008
8009 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8010
8011 std::vector<uint8_t> expectedOutput({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8012
8013 std::vector<unsigned int> blockShape({2, 2});
8014 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8015
8016 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8017 armnn::DataLayout::NHWC, inputShape, input, blockShape,
8018 crops, outputShape, expectedOutput);
8019}
8020
8021
8022LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
8023 armnn::IWorkloadFactory &workloadFactory,
8024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8025{
8026 const unsigned int inputShape[] = {4, 3, 1, 1};
8027 const unsigned int outputShape[] = {1, 3, 2, 2};
8028
8029 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
8030
8031 std::vector<uint8_t> expectedOutput
8032 ({
8033 // Batch 0, Channel 0, Height (2) x Width (2)
8034 1, 4,
8035 7, 10,
8036
8037 // Batch 0, Channel 1, Height (2) x Width (2)
8038 2, 5,
8039 8, 11,
8040
8041 // Batch 0, Channel 2, Height (2) x Width (2)
8042 3, 6,
8043 9, 12,
8044 });
8045
8046 std::vector<unsigned int> blockShape({2, 2});
8047 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8048
8049 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8050 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8051 crops, outputShape, expectedOutput);
8052}
8053
8054LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
8055 armnn::IWorkloadFactory& workloadFactory,
8056 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8057{
8058 const unsigned int inputShape[] = {4, 1, 1, 1};
8059 const unsigned int outputShape[] = {1, 1, 2, 2};
8060
8061 std::vector<uint8_t> input
8062 ({
8063 // Batch 0, Height 0, Width (2) x Channel (1)
8064 1, 2, 3, 4
8065 });
8066
8067 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
8068
8069 std::vector<unsigned int> blockShape({2, 2});
8070 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8071
8072 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8073 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8074 crops, outputShape, expectedOutput);
8075}
8076
8077LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
8078 armnn::IWorkloadFactory& workloadFactory,
8079 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8080{
8081 const unsigned int inputShape[] = {4, 3, 1, 1};
8082 const unsigned int outputShape[] = {1, 3, 2, 2};
8083
8084 std::vector<uint8_t> input({ 1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12 });
8085
8086 std::vector<uint8_t> expectedOutput
8087 ({
8088 // Batch 0, Channel 0, Height (2) x Width (2)
8089 1, 7,
8090 2, 8,
8091
8092 // Batch 0, Channel 1, Height (2) x Width (2)
8093 3, 9,
8094 4, 10,
8095
8096 // Batch 0, Channel 2, Height (2) x Width (2)
8097 5, 11,
8098 6, 12,
8099 });
8100 std::vector<unsigned int> blockShape({2, 2});
8101 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
8102
8103 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
8104 armnn::DataLayout::NCHW, inputShape, input, blockShape,
8105 crops, outputShape, expectedOutput);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00008106}
8107
8108LayerTestResult<float, 4> Debug4DFloat32Test(
8109 armnn::IWorkloadFactory& workloadFactory,
8110 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8111{
8112 return Debug4DTest<float>(workloadFactory, memoryManager);
8113}
8114
8115LayerTestResult<float, 3> Debug3DFloat32Test(
8116 armnn::IWorkloadFactory& workloadFactory,
8117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8118{
8119 return Debug3DTest<float>(workloadFactory, memoryManager);
8120}
8121
8122LayerTestResult<float, 2> Debug2DFloat32Test(
8123 armnn::IWorkloadFactory& workloadFactory,
8124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8125{
8126 return Debug2DTest<float>(workloadFactory, memoryManager);
8127}
8128
8129LayerTestResult<float, 1> Debug1DFloat32Test(
8130 armnn::IWorkloadFactory& workloadFactory,
8131 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8132{
8133 return Debug1DTest<float>(workloadFactory, memoryManager);
8134}
8135
8136LayerTestResult<uint8_t, 4> Debug4DUint8Test(
8137 armnn::IWorkloadFactory& workloadFactory,
8138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8139{
8140 return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
8141}
8142
8143LayerTestResult<uint8_t, 3> Debug3DUint8Test(
8144 armnn::IWorkloadFactory& workloadFactory,
8145 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8146{
8147 return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
8148}
8149
8150LayerTestResult<uint8_t, 2> Debug2DUint8Test(
8151 armnn::IWorkloadFactory& workloadFactory,
8152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8153{
8154 return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
8155}
8156
8157LayerTestResult<uint8_t, 1> Debug1DUint8Test(
8158 armnn::IWorkloadFactory& workloadFactory,
8159 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8160{
8161 return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
8162}