blob: b44c835cb274c98436b005e793a26ad04380ffce [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include "test/TensorHelpers.hpp"
10#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010011#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000012
13#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010014#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000015
David Beck711fa312018-09-24 10:46:38 +010016#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000017
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000018#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000019#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000020#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000021
Éanna Ó Catháinde705582018-12-03 13:04:22 +000022#include <reference/workloads/RefWorkloads.hpp>
23
telsoa014fcda012018-03-09 14:13:49 +000024#include <algorithm>
25#include <boost/cast.hpp>
26
27#include "WorkloadTestUtils.hpp"
28#include "Conv2dTestImpl.hpp"
29#include "BatchNormTestImpl.hpp"
30#include "ActivationTestImpl.hpp"
31#include "Pooling2dTestImpl.hpp"
32#include "ReshapeTestImpl.hpp"
33#include "FullyConnectedTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000034#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000035#include "SplitterTestImpl.hpp"
36#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000037#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000038#include "NormTestImpl.hpp"
39#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010040#include "LstmTestImpl.hpp"
41#include "ConvertFp16ToFp32TestImpl.hpp"
42#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000043#include "DebugTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000044
telsoa01c577f2c2018-08-31 09:22:23 +010045// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000046static std::vector<float> ConvInput3x8x16({
47 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
48 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
49 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
50 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
56 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
57 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
58 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
64 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
65 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
66 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
71});
72
telsoa01c577f2c2018-08-31 09:22:23 +010073// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000074static std::vector<float> Bias2({0, 2});
75
telsoa01c577f2c2018-08-31 09:22:23 +010076// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +000077template<typename T>
78boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
79{
80 if(biasEnabled)
81 {
82 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
83 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
84 return bias;
85 }
86 else
87 {
88 return boost::multi_array<T, 1>();
89 }
90}
91
92template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000093LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
94 armnn::IWorkloadFactory& workloadFactory,
95 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
96 float qScale,
97 int32_t qOffset,
98 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +000099 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000100{
telsoa01c577f2c2018-08-31 09:22:23 +0100101 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000102 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
103 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
104
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use a 2-element batch with 3-channel 3x5 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000106 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
107 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
108 QuantizedVector<T>(qScale, qOffset, {
109 1, 1, 1,
110 1, -1, 1,
111 1, 1, 1,
112 1, 1, 1,
113 1, 1, 1,
114
115 0, 0, 0,
116 0, 0, 0,
117 0, 0, 0,
118 0, 0, 0,
119 0, 0, 0,
120
121 2, 2, 2,
122 2, 2, 2,
123 2, 2, 2,
124 2, 2, 2,
125 2, 2, 2,
126
127
128 0, 0, 0,
129 0, 0, 0,
130 0, 0, 0,
131 0, 0, 0,
132 0, 0, 0,
133
134 1, 1, 1,
135 1, 1, 1,
136 1, 1, 1,
137 1, 1, 1,
138 1, 1, 1,
139
140 0, 0, 0,
141 0, 0, 0,
142 0, 0, 0,
143 0, 0, 0,
144 0, 0, 0
145 })));
146
telsoa01c577f2c2018-08-31 09:22:23 +0100147 // Expected output is 2 batch elements of a 1-channel 14x4 image.
telsoa014fcda012018-03-09 14:13:49 +0000148 armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
149 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
150 QuantizedVector<T>(qScale, qOffset, {
151 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
152 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
153 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
154 -23.5f, -23.5f, -23.5f,
155 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
156 -23.5f, -23.5f, -23.5f,
157
158 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
159 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
160 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
161 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
162 })));
163
164 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000165 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000166 input,
167 kernel,
168 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
169 expectedOutput,
170 qScale,
jimfly010a088a62018-10-25 17:05:05 +0100171 qOffset,
172 layout);
telsoa014fcda012018-03-09 14:13:49 +0000173}
174
175template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000176LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
177 armnn::IWorkloadFactory& workloadFactory,
178 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
179 float qScale,
180 int32_t qOffset,
181 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000182 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000183{
telsoa01c577f2c2018-08-31 09:22:23 +0100184 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // Use common single-batch 3-channel 16x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000187 armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
188 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 2-element batch of 3-channel 3x3 kernels.
telsoa014fcda012018-03-09 14:13:49 +0000191 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
192 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
193 QuantizedVector<T>(qScale, qOffset, {
194 1, 1, 1,
195 1, -1, 1,
196 1, 1, 1,
197
198 0, 0, 0,
199 0, 0, 0,
200 0, 0, 0,
201
202 2, 2, 2,
203 2, 2, 2,
204 2, 2, 2,
205
206
207 0, 0, 0,
208 0, 0, 0,
209 0, 0, 0,
210
211 1, 1, 1,
212 1, 1, 1,
213 1, 1, 1,
214
215 0, 0, 0,
216 0, 0, 0,
217 0, 0, 0
218 })));
219
telsoa01c577f2c2018-08-31 09:22:23 +0100220 // Expected output is 1 batch of a 2-channel 14x6 image.
telsoa014fcda012018-03-09 14:13:49 +0000221 armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
222 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
223 QuantizedVector<T>(qScale, qOffset, {
224 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
225 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
226 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
227 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
228 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
229 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
230
231 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
232 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
233 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
234 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
235 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
236 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
237 })));
238
239 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000240 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000241 input,
242 kernel,
243 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
244 expectedOutput,
245 qScale,
narpra015f703182018-10-26 16:24:58 +0100246 qOffset,
247 layout);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Francis Murtaghd59116e2018-10-04 16:03:07 +0100250template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
254 float qScale,
255 int32_t qOffset,
256 bool biasEnabled,
257 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100258{
259 // Use common single-batch 5x5 image.
260
261 armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
262 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
263 {
264 1, 5, 2, 3,
265 8, 7, 3, 6,
266 3, 3, 9, 1
267 });
268
269
270 // Use a 2-element batch of 3-channel 3x3 kernels.
271 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
272 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
273 4, 5, 6,
274 0, 0, 0,
275 3, 2, 1
276 });
277
278 // Expected output is 1 batch of a 5x5 image.
279 armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
280
281 const std::vector<float> outputData =
282 {
283 23, 41, 33, 21,
284 44, 65, 76, 52,
285 82, 85, 79, 42
286 };
287
288 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
289
290 return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000291 memoryManager,
Francis Murtaghd59116e2018-10-04 16:03:07 +0100292 input,
293 kernel,
294 boost::multi_array<T, 1>(),
295 expectedOutput,
296 dataLayout,
297 qScale,
298 qOffset);
299}
300
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000301LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
302 armnn::IWorkloadFactory& workloadFactory,
303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
304 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000305 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000306{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000307 return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000308}
309
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000310LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000314 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000315{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000316 return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000319LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
320 armnn::IWorkloadFactory& workloadFactory,
321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
322 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000323 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000325 return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000326}
327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000328LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
329 armnn::IWorkloadFactory& workloadFactory,
330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
331 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100332{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000333 return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
334 memoryManager,
335 0.f,
336 0,
337 biasEnabled,
338 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100339}
340
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000341LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
342 armnn::IWorkloadFactory& workloadFactory,
343 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
344 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000345 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000346{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000347 return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000348}
349
350template<typename T>
351LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
352 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000353 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000354 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000355 float qScale,
356 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000357{
telsoa01c577f2c2018-08-31 09:22:23 +0100358 // Use a single-batch 1-channel 3x3 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000359 armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
360 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
361 QuantizedVector<T>(qScale, qOffset, {
362 11,21,31,
363 12,22,32,
364 13,23,33
365 })));
366
telsoa01c577f2c2018-08-31 09:22:23 +0100367 // Use 1 batch of a 1-channel 2x2 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000368 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
369 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
370 QuantizedVector<T>(qScale, qOffset, {
371 -11,-21,
372 -12,-22,
373 })));
374
telsoa01c577f2c2018-08-31 09:22:23 +0100375// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000376// Manually calculated like this:
377//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
378//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
379//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
380//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
381//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
382//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
383//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
384 armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
385 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
386 QuantizedVector<T>(qScale, qOffset, {
387 0, 0, 0, 0, 0, 0,
388 -242, -594, -934, -372, 0, 0,
389 -495, -1190, -1850, -725, 0, 0,
390 -538, -1256, -1916, -748, 0, 0,
391 -273, -626, -946, -363, 0, 0,
392 0, 0, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0
395 })));
396
397 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000398 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000399 input,
400 kernel,
401 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
402 expectedOutput,
403 qScale,
404 qOffset,
narpra015f703182018-10-26 16:24:58 +0100405 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100406 1, // Padding left.
407 2, // Padding top.
408 3, // Padding right.
409 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000410}
411
412template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000413LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
414 armnn::IWorkloadFactory& workloadFactory,
415 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000416 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000417 float qScale,
418 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000419{
telsoa01c577f2c2018-08-31 09:22:23 +0100420 // Use a single-batch 1-channel 5x5 image as input.
telsoa014fcda012018-03-09 14:13:49 +0000421 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
422 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
423 QuantizedVector<T>(qScale, qOffset, {
424 11,21,31,41,51,
425 12,22,32,42,52,
426 13,23,33,43,53,
427 14,24,34,44,54,
428 15,25,35,45,55,
429 })));
430
telsoa01c577f2c2018-08-31 09:22:23 +0100431 // Use 1 batch of a 1-channel 4x4 kernel.
telsoa014fcda012018-03-09 14:13:49 +0000432 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
433 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
434 QuantizedVector<T>(qScale, qOffset, {
435 -11,-21,-31,-41,
436 -12,-22,-32,-42,
437 -13,-23,-33,-43,
438 -14,-24,-34,-44,
439 })));
440
telsoa01c577f2c2018-08-31 09:22:23 +0100441 // Expected output is 1 batch of a 1-channel 5x5 image.
telsoa014fcda012018-03-09 14:13:49 +0000442 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
443 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
444 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
445 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000446 -7140, -10580, -13940, -9300, -5230,
447 -9590, -14120, -18520, -12290, -6860,
448 -9980, -14560, -18960, -12560, -7000,
449 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100450 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000451 })));
452
453 return SimpleConvolution2dTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000454 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000455 input,
456 kernel,
457 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
458 expectedOutput,
459 qScale,
460 qOffset,
narpra015f703182018-10-26 16:24:58 +0100461 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100462 1, // Padding left.
463 1, // Padding top.
464 2, // Padding right.
465 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100466}
467
468template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000469LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
470 armnn::IWorkloadFactory& workloadFactory,
471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
472 float qScale,
473 int32_t qOffset,
474 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000475 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100476{
telsoa01c577f2c2018-08-31 09:22:23 +0100477 // Use a single-batch 2-channel 5x5 image as input.
surmeh013537c2c2018-05-18 16:31:43 +0100478 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
479 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
480 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
481 0, 1, 2, 3, 4,
482 5, 6, 7, 8, 9,
483 10, 11, 12, 13, 14,
484 15, 16, 17, 18, 19,
485 20, 21, 22, 23, 24,
486
487 25, 26, 27, 28, 29,
488 30, 31, 32, 33, 34,
489 35, 36, 37, 38, 39,
490 40, 41, 42, 43, 44,
491 45, 46, 47, 48, 49
492 })));
493
telsoa01c577f2c2018-08-31 09:22:23 +0100494 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
surmeh013537c2c2018-05-18 16:31:43 +0100495 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
496 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
497 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
498 32, 31, 30, 29,
499 28, 27, 26, 25,
500 24, 23, 22, 21,
501 20, 19, 18, 17,
502
503 16, 15, 14, 13,
504 12, 11, 10, 9,
505 8, 7, 6, 5,
506 4, 3, 2, 1
507 })));
508
telsoa01c577f2c2018-08-31 09:22:23 +0100509 // Expected output is 1 batch of a 2-channel 5x5 image.
510 // Calculated using the python tensorflow library with strideX=1, strideY=1.
surmeh013537c2c2018-05-18 16:31:43 +0100511 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
512 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
513 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
514 1062, 1580, 1850, 1530, 1117,
515 2140, 3108, 3500, 2842, 2042,
516 3580, 5068, 5460, 4342, 3062,
517 3618, 5072, 5390, 4248, 2971,
518 3074, 4282, 4510, 3533, 2457,
519 1550, 2284, 2362, 1955, 1428,
520 2910, 4206, 4342, 3528, 2536,
521 3390, 4886, 5022, 4068, 2916,
522 3566, 5056, 5182, 4133, 2922,
523 3100, 4352, 4452, 3517, 2465
524 })));
525
526 return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000527 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100528 input,
529 kernel,
530 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
531 expectedOutput,
532 qScale,
533 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100534 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100535 1, // Padding left.
536 1, // Padding top.
537 2, // Padding right.
538 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100539 1, // strideX
540 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000541}
542
Nikhil Rajcec6b652018-10-12 13:51:57 +0100543template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000544LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
545 armnn::IWorkloadFactory& workloadFactory,
546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
547 float qScale,
548 int32_t qOffset,
549 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100550{
551 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
552 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
553 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
554 0, 25,
555 1, 26,
556 2, 27,
557 3, 28,
558 4, 29,
559
560 5, 30,
561 6, 31,
562 7, 32,
563 8, 33,
564 9, 34,
565
566 10, 35,
567 11, 36,
568 12, 37,
569 13, 38,
570 14, 39,
571
572 15, 40,
573 16, 41,
574 17, 42,
575 18, 43,
576 19, 44,
577
578 20, 45,
579 21, 46,
580 22, 47,
581 23, 48,
582 24, 49
583 })));
584
585 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
586 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
587 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
588 32, 16,
589 31, 15,
590 30, 14,
591 29, 13,
592
593 28, 12,
594 27, 11,
595 26, 10,
596 25, 9,
597
598 24, 8,
599 23, 7,
600 22, 6,
601 21, 5,
602
603 20, 4,
604 19, 3,
605 18, 2,
606 17, 1
607 })));
608
609 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
610 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
611 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
612 1062, 1550,
613 1580, 2284,
614 1850, 2362,
615 1530, 1955,
616 1117, 1428,
617
618 2140, 2910,
619 3108, 4206,
620 3500, 4342,
621 2842, 3528,
622 2042, 2536,
623
624 3580, 3390,
625 5068, 4886,
626 5460, 5022,
627 4342, 4068,
628 3062, 2916,
629
630 3618, 3566,
631 5072, 5056,
632 5390, 5182,
633 4248, 4133,
634 2971, 2922,
635
636 3074, 3100,
637 4282, 4352,
638 4510, 4452,
639 3533, 3517,
640 2457, 2465
641 })));
642
643 return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000644 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100645 input,
646 kernel,
647 GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
648 expectedOutput,
649 qScale,
650 qOffset,
651 1, // Padding left.
652 1, // Padding top.
653 2, // Padding right.
654 2, // Padding bottom.
655 1, // strideX
656 1); // strideY
657}
658
telsoa014fcda012018-03-09 14:13:49 +0000659LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000660Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
661 armnn::IWorkloadFactory& workloadFactory,
662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000663 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000664{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
666 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000667}
668
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000669LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
670 armnn::IWorkloadFactory& workloadFactory,
671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000672 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000673{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000674 return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
675 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000676}
677
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000678LayerTestResult<float, 4> DepthwiseConvolution2dTest(
679 armnn::IWorkloadFactory& workloadFactory,
680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
681 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000682 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000683{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000684 return DepthwiseConvolution2dTestImpl<float, float>(
685 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000686}
687
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000688LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
689 armnn::IWorkloadFactory& workloadFactory,
690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
691 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100692{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000693 return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100694}
695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000696LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
699 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000700 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000701{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000702 return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
703 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000704}
705
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
709 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000710 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100711{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000712 return DepthwiseConvolution2dAsymmetricTestCommon<float>(
713 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100714}
715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000716LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
717 armnn::IWorkloadFactory& workloadFactory,
718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
719 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000720 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000721{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000722 return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
723 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000724}
725
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000726LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
727 armnn::IWorkloadFactory& workloadFactory,
728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
729 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000730 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000731{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000732 return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
733 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000734}
735
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000736LayerTestResult<float, 4> Convolution1dTest(
737 armnn::IWorkloadFactory& workloadFactory,
738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
739 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000740{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000741 return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000742}
743
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000744LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
745 armnn::IWorkloadFactory& workloadFactory,
746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
747 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000748{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749 return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000750}
751
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000752LayerTestResult<float,4> CompareConvolution2dTest(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
755 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000756{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000757 return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000758}
759
760template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000761LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
762 armnn::IWorkloadFactory& workloadFactory,
763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
764 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000765 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000766{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000767 return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000768}
769
770template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000771 armnn::IWorkloadFactory&,
772 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
773 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000774 const armnn::DataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000775
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000776template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
777 armnn::IWorkloadFactory&,
778 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
779 armnn::IWorkloadFactory&,
Matthew Bentham8800c002018-11-19 13:19:28 +0000780 const armnn::DataLayout);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000781
782LayerTestResult<float,4> SimpleNormalizationAcrossTest(
783 armnn::IWorkloadFactory& workloadFactory,
784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000785{
786 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
787 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000788 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000789}
790
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000791LayerTestResult<float,4> SimpleNormalizationWithinTest(
792 armnn::IWorkloadFactory& workloadFactory,
793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000794{
795 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
796 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000797 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000798}
799
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000800LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
801 armnn::IWorkloadFactory& workloadFactory,
802 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +0100803{
804 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
805 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000806 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +0100807}
808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000809LayerTestResult<float,2> SimpleSoftmaxTest(
810 armnn::IWorkloadFactory& workloadFactory,
811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
812 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000813{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000814 return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000815}
816
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000817LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
818 armnn::IWorkloadFactory& workloadFactory,
819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
820 float beta)
telsoa014fcda012018-03-09 14:13:49 +0000821{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000822 return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +0000823}
824
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000825LayerTestResult<float,4> CompareNormalizationTest(
826 armnn::IWorkloadFactory& workloadFactory,
827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
828 armnn::IWorkloadFactory& refWorkloadFactory,
829 armnn::NormalizationAlgorithmChannel normChannel,
830 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +0000831{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000832 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +0000833}
834
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000835LayerTestResult<float,2> CompareSoftmaxTest(
836 armnn::IWorkloadFactory& workloadFactory,
837 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000838 armnn::IWorkloadFactory& refWorkloadFactory,
839 float beta)
840{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000841 return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000842}
843
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000844LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
845 armnn::IWorkloadFactory& workloadFactory,
846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000847 armnn::IWorkloadFactory& refWorkloadFactory,
848 float beta)
849{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850 return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +0000851}
852
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000853std::vector<LayerTestResult<float,3>> SplitterTest(
854 armnn::IWorkloadFactory& workloadFactory,
855 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000856{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857 return SplitterTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000858}
859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000860std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
861 armnn::IWorkloadFactory& workloadFactory,
862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000863{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000864 return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000865}
866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867LayerTestResult<float, 3> CopyViaSplitterTest(
868 armnn::IWorkloadFactory& workloadFactory,
869 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000870{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000871 return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000872}
873
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000874LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
875 armnn::IWorkloadFactory& workloadFactory,
876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000877{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000878 return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000879}
880
telsoa01c577f2c2018-08-31 09:22:23 +0100881LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000882 armnn::IWorkloadFactory& workloadFactory,
883 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100884{
885 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
886 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
887 { 2., 3., 3., 4. }));
888
889 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
890 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
891 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
892 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000893 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
894 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100895}
896
897LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000898 armnn::IWorkloadFactory& workloadFactory,
899 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100900{
901 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
902 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
903 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
904 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
905
906 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
907 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
908 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
909 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
910 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
911 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
912 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
913 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
914 0.02168f}));
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000915 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100916}
917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000918LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
919 armnn::IWorkloadFactory& workloadFactory,
920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +0100921{
922 armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
923 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
924 {2., 3., 3., 4.}));
925
926
927 armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
928 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
929 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
930 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000932 return LstmNoCifgNoPeepholeNoProjectionTestImpl(
933 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +0100934}
935
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000936LayerTestResult<float,3> MergerTest(
937 armnn::IWorkloadFactory& workloadFactory,
938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +0000939{
surmeh013537c2c2018-05-18 16:31:43 +0100940 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +0000941 unsigned int outputHeight = 6;
942 unsigned int outputChannels = 3;
943
surmeh013537c2c2018-05-18 16:31:43 +0100944 unsigned int inputWidth1 = 3;
945 unsigned int inputHeight1 = 6;
946 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +0000947
surmeh013537c2c2018-05-18 16:31:43 +0100948 unsigned int inputWidth2 = 3;
949 unsigned int inputHeight2 = 6;
950 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +0000951
telsoa01c577f2c2018-08-31 09:22:23 +0100952 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +0000953 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
954 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
955 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +0000956
957 LayerTestResult<float,3> ret(outputTensorInfo);
958
telsoa014fcda012018-03-09 14:13:49 +0000959 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +0100960 {
961 1.0f, 2.0f, 3.0f,
962 4.0f, 5.0f, 6.0f,
963 7.0f, 8.0f, 9.0f,
964 10.0f, 11.0f, 12.0f,
965 13.0f, 14.0f, 15.0f,
966 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000967
surmeh013537c2c2018-05-18 16:31:43 +0100968 19.0f, 20.0f, 21.0f,
969 22.0f, 23.0f, 24.0f,
970 25.0f, 26.0f, 27.0f,
971 28.0f, 29.0f, 30.0f,
972 31.0f, 32.0f, 33.0f,
973 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000974
surmeh013537c2c2018-05-18 16:31:43 +0100975 37.0f, 38.0f, 39.0f,
976 40.0f, 41.0f, 42.0f,
977 43.0f, 44.0f, 45.0f,
978 46.0f, 47.0f, 48.0f,
979 49.0f, 50.0f, 51.0f,
980 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +0000981 })
982 );
983
telsoa014fcda012018-03-09 14:13:49 +0000984 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
985 {
surmeh013537c2c2018-05-18 16:31:43 +0100986 1.0f, 2.0f, 3.0f,
987 4.0f, 5.0f, 6.0f,
988 7.0f, 8.0f, 9.0f,
989 10.0f, 11.0f, 12.0f,
990 13.0f, 14.0f, 15.0f,
991 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +0000992
surmeh013537c2c2018-05-18 16:31:43 +0100993 19.0f, 20.0f, 21.0f,
994 22.0f, 23.0f, 24.0f,
995 25.0f, 26.0f, 27.0f,
996 28.0f, 29.0f, 30.0f,
997 31.0f, 32.0f, 33.0f,
998 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +0000999 })
1000 );
1001
1002 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1003 {
surmeh013537c2c2018-05-18 16:31:43 +01001004 37.0f, 38.0f, 39.0f,
1005 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001006 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001007 46.0f, 47.0f, 48.0f,
1008 49.0f, 50.0f, 51.0f,
1009 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001010 })
1011 );
1012
telsoa01c577f2c2018-08-31 09:22:23 +01001013 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00001014 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
1015
telsoa01c577f2c2018-08-31 09:22:23 +01001016 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00001017 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
1018
telsoa014fcda012018-03-09 14:13:49 +00001019 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1020
1021 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1022
1023 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1024 subTensorsSupported ?
1025 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1026 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1027
1028 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1029 subTensorsSupported ?
1030 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1031 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1032
telsoa014fcda012018-03-09 14:13:49 +00001033 armnn::MergerQueueDescriptor data;
1034 armnn::WorkloadInfo info;
1035 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1036 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001037 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1038
1039 data.m_ViewOrigins.push_back(window1);
1040 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001041
1042 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
1043
1044 inputHandle1->Allocate();
1045 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001046 outputHandle->Allocate();
1047
1048 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1049 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001050
1051 workload->Execute();
1052
1053 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1054
1055 return ret;
1056}
1057
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001058LayerTestResult<float,4> AdditionTest(
1059 armnn::IWorkloadFactory& workloadFactory,
1060 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001061{
1062 unsigned int batchSize = 2;
1063 unsigned int channels = 2;
1064 unsigned int height = 2;
1065 unsigned int width = 3;
1066
1067 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1068 armnn::TensorInfo outputTensorInfo;
1069
1070 unsigned int shape[] = {batchSize, channels, height, width};
1071
1072 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1073 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1074 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1075
1076
1077 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1078 {
1079 0.0f, 2.0f, 1.0f,
1080 0.2f, 1.0f, 2.0f,
1081
1082 1.0f, 2.0f, 1.0f,
1083 0.2f, 1.0f, 2.0f,
1084
1085 0.0f, 2.0f, 1.0f,
1086 4.2f, 1.0f, 2.0f,
1087
1088 0.0f, 0.0f, 1.0f,
1089 0.2f, 1.0f, 2.0f,
1090 }));
1091
1092 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1093 {
1094 1.0f, 2.0f, 1.0f,
1095 0.0f, 1.0f, 2.0f,
1096
1097 1.0f, 2.0f, -2.0f,
1098 0.2f, 1.0f, 2.0f,
1099
1100 0.0f, 2.0f, 1.0f,
1101 4.2f, 0.0f, -3.0f,
1102
1103 0.0f, 0.0f, 1.0f,
1104 0.7f, 1.0f, 5.0f,
1105 }));
1106
1107 LayerTestResult<float,4> ret(outputTensorInfo);
1108 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1109 {
1110 1.0f, 4.0f, 2.0f,
1111 0.2f, 2.0f, 4.0f,
1112
1113 2.0f, 4.0f, -1.0f,
1114 0.4f, 2.0f, 4.0f,
1115
1116 0.0f, 4.0f, 2.0f,
1117 8.4f, 1.0f, -1.0f,
1118
1119 0.0f, 0.0f, 2.0f,
1120 0.9f, 2.0f, 7.0f,
1121 }));
1122
1123 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1124 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1125 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1126
1127 armnn::AdditionQueueDescriptor data;
1128 armnn::WorkloadInfo info;
1129 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1130 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1131 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1132
1133 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1134
1135 inputHandle1->Allocate();
1136 inputHandle2->Allocate();
1137 outputHandle->Allocate();
1138
1139 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1140 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1141
1142 workload->Execute();
1143
1144 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1145
1146 return ret;
1147}
1148
1149template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001150LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1151 armnn::IWorkloadFactory& workloadFactory,
1152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001153 float qScale,
1154 int32_t qOffset)
1155{
1156 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
1157 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
1158 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1159
1160 if (armnn::IsQuantizedType<T>())
1161 {
1162 inputTensorInfo1.SetQuantizationScale(qScale);
1163 inputTensorInfo1.SetQuantizationOffset(qOffset);
1164 inputTensorInfo2.SetQuantizationScale(qScale);
1165 inputTensorInfo2.SetQuantizationOffset(qOffset);
1166 outputTensorInfo.SetQuantizationScale(qScale);
1167 outputTensorInfo.SetQuantizationOffset(qOffset);
1168 }
1169
1170 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1171 {
1172 0.0f,
1173 1.0f,
1174
1175 2.0f,
1176 3.0f,
1177
1178 4.0f,
1179 5.0f,
1180 }));
1181
1182 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1183 {
1184 0.5f, 1.5f, 2.5f,
1185 3.5f, 4.5f, 5.5f,
1186 }));
1187
1188 LayerTestResult<T,4> ret(outputTensorInfo);
1189 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1190 {
1191 0.5f, 1.5f, 2.5f,
1192 4.5f, 5.5f, 6.5f,
1193
1194 2.5f, 3.5f, 4.5f,
1195 6.5f, 7.5f, 8.5f,
1196
1197 4.5f, 5.5f, 6.5f,
1198 8.5f, 9.5f, 10.5f,
1199 }));
1200
1201 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1202 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1203 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1204
1205 armnn::AdditionQueueDescriptor data;
1206 armnn::WorkloadInfo info;
1207 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1208 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1210
1211 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1212
1213 inputHandle1->Allocate();
1214 inputHandle2->Allocate();
1215 outputHandle->Allocate();
1216
1217 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1218 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1219
1220 workload->Execute();
1221
1222 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1223
1224 return ret;
1225}
1226
1227template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001228LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1229 armnn::IWorkloadFactory& workloadFactory,
1230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001231 float qScale,
1232 int32_t qOffset)
1233{
1234 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1235 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
1236 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
1237
1238 if (armnn::IsQuantizedType<T>())
1239 {
1240 inputTensorInfo1.SetQuantizationScale(qScale);
1241 inputTensorInfo1.SetQuantizationOffset(qOffset);
1242 inputTensorInfo2.SetQuantizationScale(qScale);
1243 inputTensorInfo2.SetQuantizationOffset(qOffset);
1244 outputTensorInfo.SetQuantizationScale(qScale);
1245 outputTensorInfo.SetQuantizationOffset(qOffset);
1246 }
1247
1248 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1249 {
1250 0.0f, 1.0f, 2.0f,
1251 3.0f, 4.0f, 5.0f,
1252 6.0f, 7.0f, 8.0f,
1253 9.0f, 10.0f, 11.0f,
1254 12.0f, 13.0f, 14.0f,
1255 15.0f, 16.0f, 17.0f,
1256 }));
1257
1258 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1259 {
1260 0.5f,
1261 }));
1262
1263 LayerTestResult<T,4> ret(outputTensorInfo);
1264 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1265 {
1266 0.5f, 1.5f, 2.5f,
1267 3.5f, 4.5f, 5.5f,
1268 6.5f, 7.5f, 8.5f,
1269 9.5f, 10.5f, 11.5f,
1270 12.5f, 13.5f, 14.5f,
1271 15.5f, 16.5f, 17.5f,
1272 }));
1273
1274 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1275 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1276 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1277
1278 armnn::AdditionQueueDescriptor data;
1279 armnn::WorkloadInfo info;
1280 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1281 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1282 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283
1284 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1285
1286 inputHandle1->Allocate();
1287 inputHandle2->Allocate();
1288 outputHandle->Allocate();
1289
1290 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1291 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1292
1293 workload->Execute();
1294
1295 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1296
1297 return ret;
1298}
1299
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001300LayerTestResult<float, 4> AdditionBroadcastTest(
1301 armnn::IWorkloadFactory& workloadFactory,
1302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001303{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001304 return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001305}
1306
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001307LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1308 armnn::IWorkloadFactory& workloadFactory,
1309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001310{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001311 return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001312}
1313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001314LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1315 armnn::IWorkloadFactory& workloadFactory,
1316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001317{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001318 return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001319}
1320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001321LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1322 armnn::IWorkloadFactory& workloadFactory,
1323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001324{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001325 return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001326}
1327
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001328LayerTestResult<float,4> CompareAdditionTest(
1329 armnn::IWorkloadFactory& workloadFactory,
1330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1331 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001332{
1333 unsigned int batchSize = 4;
1334 unsigned int channels = 1;
1335 unsigned int height = 2;
1336 unsigned int width = 3;
1337
1338 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1339 armnn::TensorInfo outputTensorInfo;
1340
1341 unsigned int shape[] = {batchSize, channels, height, width};
1342
1343 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1344 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1345 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1346
1347 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1348 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1349
1350 LayerTestResult<float,4> ret(outputTensorInfo);
1351
1352 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1353 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1354 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1355
1356 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1357 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1358 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1359
1360 armnn::AdditionQueueDescriptor data;
1361 armnn::WorkloadInfo info;
1362 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1363 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1364 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1365
1366 armnn::AdditionQueueDescriptor refData = data;
1367 armnn::WorkloadInfo refInfo = info;
1368 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1369 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1370 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1371
1372 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1373 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1374
1375 inputHandle1->Allocate();
1376 inputHandle2->Allocate();
1377 outputHandle->Allocate();
1378 inputHandle1Ref->Allocate();
1379 inputHandle2Ref->Allocate();
1380 outputHandleRef->Allocate();
1381
1382 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1383 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1384 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1385 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1386
1387 workload->Execute();
1388 workloadRef->Execute();
1389
1390 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1391 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1392
1393 return ret;
1394}
1395
surmeh01bceff2f2018-03-29 16:29:27 +01001396namespace {
David Beck5cd01f32018-09-12 16:00:08 +01001397template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001398LayerTestResult<T, 4> DivisionTestHelper(
1399 armnn::IWorkloadFactory& workloadFactory,
1400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1401 const unsigned int shape0[4],
1402 const std::vector<T>& values0,
1403 float scale0,
1404 int32_t offset0,
1405 const unsigned int shape1[4],
1406 const std::vector<T> & values1,
1407 float scale1,
1408 int32_t offset1,
1409 const unsigned int outShape[4],
1410 const std::vector<T> & outValues,
1411 float outScale,
1412 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001413{
1414 auto dataType = (std::is_same<T, uint8_t>::value ?
1415 armnn::DataType::QuantisedAsymm8 :
1416 armnn::DataType::Float32);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001417
David Beck5cd01f32018-09-12 16:00:08 +01001418 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
1419 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
1420 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001421
David Beck5cd01f32018-09-12 16:00:08 +01001422 inputTensorInfo0.SetQuantizationScale(scale0);
1423 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001424
David Beck5cd01f32018-09-12 16:00:08 +01001425 inputTensorInfo1.SetQuantizationScale(scale1);
1426 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001427
David Beck5cd01f32018-09-12 16:00:08 +01001428 outputTensorInfo.SetQuantizationScale(outScale);
1429 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001430
David Beck5cd01f32018-09-12 16:00:08 +01001431 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1432 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001433
David Beck5cd01f32018-09-12 16:00:08 +01001434 LayerTestResult<T, 4> result(outputTensorInfo);
1435 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001436
David Beck5cd01f32018-09-12 16:00:08 +01001437 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1438 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1439 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001440
David Beck5cd01f32018-09-12 16:00:08 +01001441 armnn::DivisionQueueDescriptor data;
1442 armnn::WorkloadInfo info;
1443 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1444 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1445 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001446
David Beck5cd01f32018-09-12 16:00:08 +01001447 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001448
David Beck5cd01f32018-09-12 16:00:08 +01001449 inputHandle0->Allocate();
1450 inputHandle1->Allocate();
1451 outputHandle->Allocate();
1452
1453 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1454 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1455
David Beck5cd01f32018-09-12 16:00:08 +01001456 workload->Execute();
1457
1458 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1459
1460 return result;
1461}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001462} // anonymous namespace
1463
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001464LayerTestResult<float,4> DivisionByZeroTest(
1465 armnn::IWorkloadFactory& workloadFactory,
1466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001467{
1468 const unsigned int width = 2;
1469 const unsigned int height = 2;
1470 const unsigned int channelCount = 2;
1471 const unsigned int batchSize = 2;
1472
1473 unsigned int shape[] = { batchSize, channelCount, height, width };
1474
1475 std::vector<float> input0({
1476 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1477 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1478
1479 std::vector<float> input1({
1480 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1481 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1482
1483 std::vector<float> output({
1484 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1485 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1486
David Beck5cd01f32018-09-12 16:00:08 +01001487 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001488 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001489 shape, input0, 1.0f, 0,
1490 shape, input1, 1.0f, 0,
1491 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001492}
1493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001494LayerTestResult<float,4> DivisionTest(
1495 armnn::IWorkloadFactory& workloadFactory,
1496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001497{
1498 const unsigned int width = 2;
1499 const unsigned int height = 2;
1500 const unsigned int channelCount = 2;
1501 const unsigned int batchSize = 2;
1502
1503 unsigned int shape[] = { batchSize, channelCount, height, width };
1504
1505 std::vector<float> input0({
1506 2, 2, 2, 2, 3, 3, 3, 3,
1507 4, 4, 4, 4, 5, 5, 5, 5 });
1508
1509 std::vector<float> input1({
1510 1, 1, 1, 1, 2, 2, 2, 2,
1511 4, 4, 4, 4, 4, 4, 4, 4 });
1512
1513 std::vector<float> output({
1514 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1515 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1516
David Beck5cd01f32018-09-12 16:00:08 +01001517
1518 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001519 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001520 shape, input0, 1.0f, 0,
1521 shape, input1, 1.0f, 0,
1522 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001523}
1524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001525LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1526 armnn::IWorkloadFactory& workloadFactory,
1527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001528{
1529 unsigned int shape0[] = { 1, 2, 2, 2 };
1530 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1531
1532 unsigned int shape1[] = { 1, 1, 1, 1 };
1533 std::vector<float> input1({ 2 });
1534
1535 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1536
David Beck5cd01f32018-09-12 16:00:08 +01001537
1538 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001539 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001540 shape0, input0, 1.0f, 0,
1541 shape1, input1, 1.0f, 0,
1542 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001543}
1544
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001545LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1546 armnn::IWorkloadFactory& workloadFactory,
1547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001548{
1549 unsigned int shape0[] = { 1, 3, 3, 2 };
1550 std::vector<float> input0({
1551 1, 4, 3, 8, 5, 12,
1552 7, 16, 9, 20, 11, 24,
1553 13, 28, 15, 32, 17, 36});
1554
1555 unsigned int shape1[] = { 1, 1, 1, 2 };
1556 std::vector<float> input1({ 1, 2 });
1557
1558 std::vector<float> output({
1559 1, 2, 3, 4, 5, 6,
1560 7, 8, 9, 10, 11, 12,
1561 13, 14, 15, 16, 17, 18});
1562
David Beck5cd01f32018-09-12 16:00:08 +01001563 return DivisionTestHelper<float>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001564 memoryManager,
David Beck5cd01f32018-09-12 16:00:08 +01001565 shape0, input0, 1.0f, 0,
1566 shape1, input1, 1.0f, 0,
1567 shape0, output, 1.0f, 0);
1568}
1569
1570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001571LayerTestResult<uint8_t,4> DivisionUint8Test(
1572 armnn::IWorkloadFactory& workloadFactory,
1573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001574{
1575 const unsigned int width = 2;
1576 const unsigned int height = 2;
1577 const unsigned int channelCount = 2;
1578 const unsigned int batchSize = 2;
1579
1580 unsigned int shape[] = { batchSize, channelCount, height, width };
1581
1582 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1583 4, 4, 4, 4, 5, 5, 5, 5 });
1584
1585 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1586 4, 4, 4, 4, 4, 4, 4, 4 });
1587
1588 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1589 4, 4, 4, 4, 5, 5, 5, 5});
1590
1591
1592 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001593 memoryManager,
1594 shape, input0, 1.0f, 0,
1595 shape, input1, 1.0f, 0,
1596 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001597}
1598
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001599LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1600 armnn::IWorkloadFactory& workloadFactory,
1601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001602{
1603 unsigned int shape0[] = { 1, 2, 2, 2 };
1604 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1605
1606 unsigned int shape1[] = { 1, 1, 1, 1 };
1607 std::vector<uint8_t> input1({ 2 });
1608
1609 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1610
1611 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001612 memoryManager,
1613 shape0, input0, 1.0f, 0,
1614 shape1, input1, 1.0f, 0,
1615 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001616}
1617
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001618LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1619 armnn::IWorkloadFactory& workloadFactory,
1620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001621{
1622 unsigned int shape0[] = { 1, 3, 3, 2 };
1623 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1624 7, 16, 9, 20, 11, 24,
1625 13, 28, 15, 32, 17, 36});
1626
1627 unsigned int shape1[] = { 1, 1, 1, 2 };
1628 std::vector<uint8_t> input1({ 1, 2 });
1629
1630 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
1631 7, 8, 9, 10, 11, 12,
1632 13, 14, 15, 16, 17, 18});
1633
1634 return DivisionTestHelper<uint8_t>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001635 memoryManager,
1636 shape0, input0, 1.0f, 0,
1637 shape1, input1, 1.0f, 0,
1638 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001639}
1640
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001641template<typename DescriptorType>
1642std::unique_ptr<armnn::IWorkload> CreateWorkload(
1643 const armnn::IWorkloadFactory& workloadFactory,
1644 const armnn::WorkloadInfo& info,
1645 const DescriptorType& descriptor)
1646{
1647 return CreateWorkload(workloadFactory, info, descriptor);
1648};
1649
1650template<>
1651std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
1652 const armnn::IWorkloadFactory& workloadFactory,
1653 const armnn::WorkloadInfo& info,
1654 const armnn::MaximumQueueDescriptor& descriptor)
1655{
1656 return workloadFactory.CreateMaximum(descriptor, info);
1657}
1658
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001659template<>
1660std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
1661 const armnn::IWorkloadFactory& workloadFactory,
1662 const armnn::WorkloadInfo& info,
1663 const armnn::MinimumQueueDescriptor& descriptor)
1664{
1665 return workloadFactory.CreateMinimum(descriptor, info);
1666}
1667
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001668namespace {
1669 template <typename Descriptor, typename dataType>
1670 LayerTestResult<dataType, 4> ElementwiseTestHelper
1671 (armnn::IWorkloadFactory & workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
1673 const unsigned int shape0[4], std::vector<dataType> values0,
1674 const unsigned int shape1[4], std::vector<dataType> values1,
1675 const unsigned int outShape[4], std::vector<dataType> outValues,
1676 float qScale = 0.0f, int qOffset = 0)
1677 {
1678 const size_t dimensionCount = 4;
1679 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
1680 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
1681 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
1682
1683 auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
1684 auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
1685
1686 if (armnn::IsQuantizedType<dataType>())
1687 {
1688 inputTensorInfo0.SetQuantizationScale(qScale);
1689 inputTensorInfo0.SetQuantizationOffset(qOffset);
1690
1691 inputTensorInfo1.SetQuantizationScale(qScale);
1692 inputTensorInfo1.SetQuantizationOffset(qOffset);
1693
1694 outputTensorInfo.SetQuantizationScale(qScale);
1695 outputTensorInfo.SetQuantizationOffset(qOffset);
1696 }
1697
1698 LayerTestResult<dataType,4> ret(outputTensorInfo);
1699
1700 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1701 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1702 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1703
1704 Descriptor data;
1705 armnn::WorkloadInfo info;
1706 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1707 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1708 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1709 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
1710
1711 inputHandle0->Allocate();
1712 inputHandle1->Allocate();
1713 outputHandle->Allocate();
1714
1715 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1716 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1717
1718 ExecuteWorkload(*workload, memoryManager);
1719
1720 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1721
1722 ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
1723 return ret;
1724 }
1725}
1726
1727
1728LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
1729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1730{
1731 const unsigned int width = 2;
1732 const unsigned int height = 2;
1733 const unsigned int channelCount = 2;
1734 const unsigned int batchSize = 2;
1735
1736 unsigned int shape[] = { batchSize, channelCount, height, width };
1737
1738 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
1739 3, 3, 3, 3, 4, 4, 4, 4 });
1740
1741 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
1742 4, 4, 4, 4, 5, 5, 5, 5 });
1743
1744 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
1745 4, 4, 4, 4, 5, 5, 5, 5 });
1746
1747 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1748 (workloadFactory,
1749 memoryManager,
1750 shape,
1751 input0,
1752 shape,
1753 input1,
1754 shape,
1755 output);
1756}
1757
1758LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
1759 armnn::IWorkloadFactory& workloadFactory,
1760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1761{
1762 unsigned int shape0[] = { 1, 2, 2, 2 };
1763 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1764
1765 unsigned int shape1[] = { 1, 1, 1, 1 };
1766 std::vector<float> input1({ 2 });
1767
1768 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
1769
1770 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1771 (workloadFactory,
1772 memoryManager,
1773 shape0,
1774 input0,
1775 shape1,
1776 input1,
1777 shape0,
1778 output);
1779}
1780
1781LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
1782 armnn::IWorkloadFactory& workloadFactory,
1783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1784{
1785 const unsigned int shape0[] = { 1, 2, 2, 3 };
1786 const unsigned int shape1[] = { 1, 1, 1, 3 };
1787
1788 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
1789 7, 8, 9, 10, 11, 12 });
1790
1791 std::vector<float> input1({ 1, 2, 3});
1792
1793 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
1794 7, 8, 9, 10, 11, 12 });
1795
1796 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
1797 (workloadFactory,
1798 memoryManager,
1799 shape0,
1800 input0,
1801 shape1,
1802 input1,
1803 shape0,
1804 output);
1805}
1806
1807LayerTestResult<uint8_t, 4> MaximumUint8Test(
1808 armnn::IWorkloadFactory& workloadFactory,
1809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1810{
1811 unsigned int shape[] = { 2, 2, 2, 2 };
1812
1813 // See dequantized values to the right.
1814 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
1815 3, 3, 3, 3, 4, 4, 4, 4 });
1816
1817 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
1818 4, 4, 4, 4, 5, 5, 5, 5 });
1819
1820 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
1821 4, 4, 4, 4, 5, 5, 5, 5 });
1822
1823 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
1824 (workloadFactory,
1825 memoryManager,
1826 shape,
1827 input0,
1828 shape,
1829 input1,
1830 shape,
1831 output,
1832 1.0f,
1833 0);
1834}
1835
1836LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
1837 armnn::IWorkloadFactory& workloadFactory,
1838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1839{
1840 const unsigned int shape0[] = { 1, 2, 2, 3 };
1841 const unsigned int shape1[] = { 1, 1, 1, 1 };
1842
1843 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1844 7, 8, 9, 10, 11, 12 });
1845
1846 std::vector<uint8_t> input1({2});
1847
1848 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
1849 7, 8, 9, 10, 11, 12 });
1850
1851 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
1852 (workloadFactory,
1853 memoryManager,
1854 shape0,
1855 input0,
1856 shape1,
1857 input1,
1858 shape0,
1859 output,
1860 1.0f,
1861 0);
1862}
1863
1864LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
1865 armnn::IWorkloadFactory& workloadFactory,
1866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1867{
1868 const unsigned int shape0[] = { 1, 2, 2, 3 };
1869 const unsigned int shape1[] = { 1, 1, 1, 3 };
1870
1871 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
1872 7, 8, 9, 10, 11, 12 });
1873
1874 std::vector<uint8_t> input1({ 1, 10, 3});
1875
1876 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
1877 7, 10, 9, 10, 11, 12 });
1878
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001879 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
Éanna Ó Catháinde705582018-12-03 13:04:22 +00001880 (workloadFactory,
1881 memoryManager,
1882 shape0,
1883 input0,
1884 shape1,
1885 input1,
1886 shape0,
1887 output,
1888 1.0f,
1889 0);
1890}
1891
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00001892LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
1893 armnn::IWorkloadFactory& workloadFactory,
1894 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1895{
1896 unsigned int shape0[] = { 1, 2, 2, 2 };
1897 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
1898
1899 unsigned int shape1[] = { 1, 1, 1, 1 };
1900 std::vector<float> input1({ 2 });
1901
1902 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
1903
1904 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
1905 memoryManager,
1906 shape0,
1907 input0,
1908 shape1,
1909 input1,
1910 shape0,
1911 output);
1912}
1913
1914
1915LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
1916 armnn::IWorkloadFactory& workloadFactory,
1917 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1918{
1919 unsigned int shape0[] = { 1, 2, 2, 2 };
1920 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
1921
1922 unsigned int shape1[] = { 1, 1, 1, 1 };
1923 std::vector<float> input1({ 5 });
1924
1925 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
1926
1927 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
1928 memoryManager,
1929 shape0,
1930 input0,
1931 shape1,
1932 input1,
1933 shape0,
1934 output);
1935}
1936
1937LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
1938 armnn::IWorkloadFactory & workloadFactory,
1939 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
1940{
1941 const unsigned int shape0[] = { 1, 2, 2, 3 };
1942 const unsigned int shape1[] = { 1, 1, 1, 3 };
1943
1944 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
1945 7, 1, 2, 3, 4, 5 });
1946
1947 std::vector<uint8_t> input1({ 1, 2, 3});
1948
1949 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
1950 1, 1, 2, 1, 2, 3 });
1951
1952 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
1953 memoryManager,
1954 shape0,
1955 input0,
1956 shape1,
1957 input1,
1958 shape0,
1959 output,
1960 1.0f,
1961 0);
1962}
1963
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001964namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001965LayerTestResult<float,4> MultiplicationTestHelper(
1966 armnn::IWorkloadFactory& workloadFactory,
1967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1968 const unsigned int shape0[4],
1969 const std::vector<float> & values0,
1970 const unsigned int shape1[4],
1971 const std::vector<float> & values1,
1972 const unsigned int outShape[4],
1973 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00001974{
surmeh01bceff2f2018-03-29 16:29:27 +01001975 const size_t dimensionCount = 4;
1976 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
1977 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
1978 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00001979
surmeh01bceff2f2018-03-29 16:29:27 +01001980 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
1981 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00001982
1983 LayerTestResult<float,4> ret(outputTensorInfo);
1984
1985 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1986 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1987 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1988
1989 armnn::MultiplicationQueueDescriptor data;
1990 armnn::WorkloadInfo info;
1991 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1992 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1993 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1994
1995 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
1996
1997 inputHandle0->Allocate();
1998 inputHandle1->Allocate();
1999 outputHandle->Allocate();
2000
2001 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2002 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2003
2004 workload->Execute();
2005
2006 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2007
surmeh01bceff2f2018-03-29 16:29:27 +01002008 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00002009 return ret;
2010}
surmeh01bceff2f2018-03-29 16:29:27 +01002011} // anonymous namespace
2012
2013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002014LayerTestResult<float,4> MultiplicationTest(
2015 armnn::IWorkloadFactory& workloadFactory,
2016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002017{
2018 const unsigned int width = 2;
2019 const unsigned int height = 2;
2020 const unsigned int channelCount = 2;
2021 const unsigned int batchSize = 2;
2022
2023 unsigned int shape[] = { batchSize, channelCount, height, width };
2024
2025 std::vector<float> input0({
2026 1, 1, 1, 1, 2, 2, 2, 2,
2027 3, 3, 3, 3, 4, 4, 4, 4 });
2028
2029 std::vector<float> input1({
2030 2, 2, 2, 2, 3, 3, 3, 3,
2031 4, 4, 4, 4, 5, 5, 5, 5 });
2032
2033 std::vector<float> output({
2034 2, 2, 2, 2, 6, 6, 6, 6,
2035 12, 12, 12, 12, 20, 20, 20, 20 });
2036
2037 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002038 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002039 shape,
2040 input0,
2041 shape,
2042 input1,
2043 shape,
2044 output);
2045}
2046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002047LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
2048 armnn::IWorkloadFactory& workloadFactory,
2049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002050{
2051 unsigned int shape0[] = { 1, 2, 2, 2 };
2052 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2053
2054 unsigned int shape1[] = { 1, 1, 1, 1 };
2055 std::vector<float> input1({ 2 });
2056
2057 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
2058
2059 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002060 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002061 shape0,
2062 input0,
2063 shape1,
2064 input1,
2065 shape0,
2066 output);
2067}
2068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002069LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
2070 armnn::IWorkloadFactory& workloadFactory,
2071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01002072{
2073 unsigned int shape0[] = { 1, 3, 3, 2 };
2074 std::vector<float> input0({
2075 1, 2, 3, 4, 5, 6,
2076 7, 8, 9, 10, 11, 12,
2077 13, 14, 15, 16, 17, 18});
2078
2079 unsigned int shape1[] = { 1, 1, 1, 2 };
2080 std::vector<float> input1({ 1, 2 });
2081
2082 std::vector<float> output({
2083 1, 4, 3, 8, 5, 12,
2084 7, 16, 9, 20, 11, 24,
2085 13, 28, 15, 32, 17, 36});
2086
2087 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002088 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01002089 shape0,
2090 input0,
2091 shape1,
2092 input1,
2093 shape0,
2094 output);
2095}
telsoa014fcda012018-03-09 14:13:49 +00002096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002097LayerTestResult<float,4> CompareMultiplicationTest(
2098 armnn::IWorkloadFactory& workloadFactory,
2099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2100 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002101{
2102 const unsigned int width = 16;
2103 const unsigned int height = 32;
2104 const unsigned int channelCount = 2;
2105 const unsigned int batchSize = 5;
2106
2107 armnn::TensorInfo inputTensorInfo0;
2108 armnn::TensorInfo inputTensorInfo1;
2109 armnn::TensorInfo outputTensorInfo;
2110
2111 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
2112
2113 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2114 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2115 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2116
2117 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
2118
2119 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
2120 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
2121
2122 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2123 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2124 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2125
2126 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
2127 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
2128 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2129
2130 armnn::MultiplicationQueueDescriptor data;
2131 armnn::WorkloadInfo info;
2132 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2133 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2134 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2135
2136 armnn::MultiplicationQueueDescriptor refData = data;
2137 armnn::WorkloadInfo refInfo = info;
2138 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
2139 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
2140 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2141
2142 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2143 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
2144
2145 inputHandle0->Allocate();
2146 inputHandle1->Allocate();
2147 outputHandle->Allocate();
2148 inputHandle0Ref->Allocate();
2149 inputHandle1Ref->Allocate();
2150 outputHandleRef->Allocate();
2151
2152 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2153 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2154 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
2155 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
2156
2157 workload->Execute();
2158 workloadRef->Execute();
2159
2160 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
2161 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
2162
2163 return comparisonResult;
2164}
2165
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002166LayerTestResult<float,4> CompareBatchNormTest(
2167 armnn::IWorkloadFactory& workloadFactory,
2168 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2169 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00002170{
2171 const unsigned int width = 2;
2172 const unsigned int height = 3;
2173 const unsigned int channels = 5;
2174 const unsigned int batchSize = 3;
2175
2176 armnn::TensorInfo inputTensorInfo;
2177 armnn::TensorInfo outputTensorInfo;
2178 armnn::TensorInfo tensorInfo;
2179
2180 constexpr unsigned int shape[] = {batchSize, channels, height, width};
2181 constexpr unsigned int tensorShape[] = {channels};
2182
2183 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2184 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
2185 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
2186
2187 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
2188
2189 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
2190 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
2191 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
2192 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
2193
2194 LayerTestResult<float,4> ret(outputTensorInfo);
2195
2196 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2197 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2198
2199 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
2200 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
2201
2202 armnn::BatchNormalizationQueueDescriptor data;
2203 armnn::WorkloadInfo info;
2204 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
2205 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
2206 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
2207 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
2208
2209 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
2210 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
2211 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
2212 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
2213
2214 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2215 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2216 data.m_Mean = &meanTensor;
2217 data.m_Variance = &varianceTensor;
2218 data.m_Beta = &betaTensor;
2219 data.m_Gamma = &gammaTensor;
2220 data.m_Parameters.m_Eps = 0.01f;
2221
2222 armnn::BatchNormalizationQueueDescriptor refData = data;
2223 armnn::WorkloadInfo refInfo = info;
2224 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2225 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2226
2227 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
2228 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
2229
2230 inputHandle->Allocate();
2231 outputHandle->Allocate();
2232 inputHandleRef->Allocate();
2233 outputHandleRef->Allocate();
2234
2235 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
2236 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
2237
2238 workload->Execute();
2239 workloadRef->Execute();
2240
2241 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2242 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
2243
2244 return ret;
2245}
2246
surmeh013537c2c2018-05-18 16:31:43 +01002247template<typename T>
2248void PermuteTensorData(
2249 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002251 const armnn::PermutationVector& mappings,
2252 armnn::TensorInfo & inputTensorInfo,
2253 const T * inputData,
2254 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00002255{
surmeh013537c2c2018-05-18 16:31:43 +01002256 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
2257 if (inputData == nullptr)
2258 {
2259 // Nullptr is an error in the test. By returning without doing the concatenation
2260 // I expect the caller to fail the test. It still makes sense to report this as
2261 // an assert for Debug builds.
2262 return;
2263 }
telsoa014fcda012018-03-09 14:13:49 +00002264
surmeh013537c2c2018-05-18 16:31:43 +01002265 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
2266
2267 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
2268 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2269
2270 armnn::PermuteQueueDescriptor queueDescriptor;
2271 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
2272 armnn::WorkloadInfo workloadInfo;
2273 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
2274 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2275
2276 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
2277
2278 inputHandle->Allocate();
2279 outputHandle->Allocate();
2280
2281 CopyDataToITensorHandle(inputHandle.get(), inputData);
2282
2283 workload->Execute();
2284
2285 outputData.resize(outputTensorInfo.GetNumElements());
2286 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
2287 inputTensorInfo = outputTensorInfo;
2288}
2289
2290armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
2291 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2292 unsigned int concatDim)
2293{
telsoa014fcda012018-03-09 14:13:49 +00002294 std::vector<armnn::TensorShape> shapes;
2295 shapes.reserve(inputTensorInfos.size());
2296 for (const armnn::TensorInfo& it: inputTensorInfos)
2297 {
2298 shapes.push_back(it.GetShape());
2299 }
surmeh013537c2c2018-05-18 16:31:43 +01002300
2301 return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
2302 shapes.end(),
2303 concatDim);
2304}
2305
2306//
narpra015cdda352018-11-19 15:30:27 +00002307// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
2308// In case of <4 dimensions we need to make sure that the concat dimensions are at least
2309// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01002310//
2311
2312bool NeedPermuteForConcat(
2313 const std::vector<armnn::TensorInfo> & inputTensorInfos,
2314 unsigned int concatDim)
2315{
2316 // See note above. Additionally we expect the input shapes to have the
2317 // same number of dimensions.
2318 unsigned int nDimensions = 0;
2319
telsoa01c577f2c2018-08-31 09:22:23 +01002320 // Determine the number of dimensions as well as sanity check them
2321 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01002322 for (auto && tensorInfo : inputTensorInfos)
2323 {
2324 if (!nDimensions)
2325 {
2326 nDimensions = tensorInfo.GetShape().GetNumDimensions();
2327 }
2328 else
2329 {
2330 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
2331 "Input shapes must have the same number of dimensions");
2332 }
2333 }
2334
narpra015cdda352018-11-19 15:30:27 +00002335 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01002336}
2337
2338armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
2339{
2340 unsigned int numDims = inputShape.GetNumDimensions();
2341 if (numDims >= 3)
2342 {
2343 // Nothing to do if the inputShape has at least 3 dimensions.
2344 return inputShape;
2345 }
2346
2347 std::vector<unsigned int> newDims(size_t(3), 1u);
2348 unsigned int expandedBy = 3 - numDims;
2349 for (unsigned int i=0; i<numDims; ++i)
2350 {
2351 newDims[expandedBy+i] = inputShape[i];
2352 }
2353 return armnn::TensorShape(3u, &newDims[0]);
2354}
2355
2356void Generate3dPermuteVectorForConcat(
2357 unsigned int numDimensions,
2358 unsigned int & concatDim,
2359 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
2360{
2361 BOOST_ASSERT_MSG(numDimensions <= 3,
2362 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01002363 unsigned int expandedBy = 3 - numDimensions;
2364 unsigned int expandedConcatAxis = concatDim + expandedBy;
2365
2366 if (expandedConcatAxis == 2)
2367 {
2368 concatDim = 0;
2369 armnn::PermutationVector forwardPermutation({1, 2, 0});
2370 armnn::PermutationVector reversePermutation({2, 0, 1});
2371 permutations = std::make_pair(forwardPermutation, reversePermutation);
2372 }
2373 else if (expandedConcatAxis == 1)
2374 {
2375 concatDim = 0;
2376 armnn::PermutationVector forwardPermutation({2, 0, 1});
2377 armnn::PermutationVector reversePermutation({1, 2, 0});
2378 permutations = std::make_pair(forwardPermutation, reversePermutation);
2379 }
2380 else
2381 {
2382 BOOST_ASSERT(expandedConcatAxis == 0);
2383 concatDim = 0;
2384 }
2385}
2386
2387//
2388// Permute the input tensors so we can do a supported concatenation.
2389// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
2390// at the front. Finally this function tells what the output shape
2391// of the permuted concatenated tensor is going to be.
2392//
2393template <typename T>
2394void PermuteInputsForConcat(
2395 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002397 std::vector<armnn::TensorInfo> & inputTensorInfos,
2398 std::vector<T *> & inputData,
2399 std::vector<std::vector<T>> & inputDataStorage,
2400 armnn::PermutationVector & permuteVector,
2401 unsigned int & concatDim,
2402 armnn::TensorInfo & outputTensorInfo)
2403{
2404 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
2405 "Expecting more than one tensor to be concatenated here");
2406
2407 unsigned int numDims = 0;
2408 unsigned int nthInput = 0;
2409 const armnn::PermutationVector identity({0, 1, 2});
2410
2411 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
2412 std::make_pair(identity, identity);
2413
2414 inputDataStorage.resize(inputData.size());
2415
2416 for (auto && tensorInfo : inputTensorInfos)
2417 {
2418 if (numDims == 0)
2419 {
2420 numDims = tensorInfo.GetShape().GetNumDimensions();
2421 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00002422
telsoa01c577f2c2018-08-31 09:22:23 +01002423 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01002424 permuteVector = permutations.second;
2425 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
2426 "Test logic error, we don't need permutation, so we shouldn't arrive here");
2427 }
2428 else
2429 {
2430 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
2431 "All inputs must have the same number of dimensions");
2432 }
2433
2434 armnn::TensorInfo newTensorInfo = tensorInfo;
2435 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
2436
2437 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002438 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002439 permutations.first,
2440 newTensorInfo,
2441 inputData[nthInput],
2442 inputDataStorage[nthInput]);
2443
2444 inputData[nthInput] = inputDataStorage[nthInput].data();
2445 inputTensorInfos[nthInput] = newTensorInfo;
2446
2447 ++nthInput;
2448 }
2449
2450 outputTensorInfo.SetShape(
2451 armnnUtils::Permuted(
2452 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
2453 permutations.first));
2454}
2455
2456
2457//
2458// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01002459// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01002460// output.
2461//
2462template <typename T>
2463void PermuteOutputForConcat(
2464 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002466 const armnn::TensorInfo & tensorInfo,
2467 const armnn::PermutationVector & permuteVector,
2468 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
2469 T * data)
2470{
2471 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
2472 if (data == nullptr)
2473 {
2474 // Nullptr is an error in the test. By returning without doing the permutation
2475 // I expect the caller to fail the test. It still makes sense to report this as
2476 // an assert for Debug builds.
2477 return;
2478 }
2479
2480 armnn::TensorInfo resultTensorInfo = tensorInfo;
2481 std::vector<T> inputData(tensorInfo.GetNumElements());
2482 std::vector<T> outputData;
2483
2484 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
2485
2486 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002487 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002488 permuteVector,
2489 resultTensorInfo,
2490 &inputData[0],
2491 outputData);
2492
2493 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
2494}
2495
2496template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002497void Concatenate(
2498 armnn::IWorkloadFactory& workloadFactory,
2499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2500 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
2501 std::initializer_list<T *> inputsOrig,
2502 const armnn::TensorInfo& outputTensorInfoOrig,
2503 T * output,
narpra015cdda352018-11-19 15:30:27 +00002504 unsigned int concatDim,
2505 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01002506{
2507 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
2508 if (output == nullptr)
2509 {
2510 // Nullptr is an error in the test. By returning without doing the permutation
2511 // I expect the caller to fail the test. It still makes sense to report this as
2512 // an assert for Debug builds.
2513 return;
2514 }
2515
telsoa01c577f2c2018-08-31 09:22:23 +01002516 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01002517 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
2518 std::vector<T *> inputs = inputsOrig;
2519 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
2520
2521 armnn::PermutationVector permuteVector{0, 1, 2};
2522
telsoa01c577f2c2018-08-31 09:22:23 +01002523 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01002524 std::vector<std::vector<T>> tmpInputDataStorage;
2525
2526 const size_t inputCount = inputTensorInfos.size();
2527
2528 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
2529
2530 if (needPermuteForConcat)
2531 {
2532 //
2533 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01002534 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01002535 //
2536 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002537 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002538 inputTensorInfos,
2539 inputs,
2540 tmpInputDataStorage,
2541 permuteVector,
2542 concatDim,
2543 outputTensorInfo);
2544 }
2545
narpra015cdda352018-11-19 15:30:27 +00002546 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00002547
2548 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
2549 inputHandles.reserve(inputCount);
2550
narpra015cdda352018-11-19 15:30:27 +00002551 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2552
2553 armnn::MergerQueueDescriptor queueDescriptor;
2554 armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
2555 queueDescriptor.m_Parameters = viewsDescriptor;
2556
2557 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00002558 {
narpra015cdda352018-11-19 15:30:27 +00002559 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
2560 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
2561 {
2562 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
2563 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
2564 }
telsoa014fcda012018-03-09 14:13:49 +00002565
narpra015cdda352018-11-19 15:30:27 +00002566 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00002567
narpra015cdda352018-11-19 15:30:27 +00002568 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2569 for (unsigned int i = 0; i < inputCount; ++i)
2570 {
2571 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
2572 std::unique_ptr<armnn::ITensorHandle> inputHandle =
2573 subTensorsSupported ?
2574 workloadFactory.CreateSubTensorHandle(*outputHandle,
2575 inputTensorInfo.GetShape(),
2576 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
2577 workloadFactory.CreateTensorHandle(inputTensorInfo);
2578
2579 inputHandles.emplace_back(std::move(inputHandle));
2580 }
2581
telsoa014fcda012018-03-09 14:13:49 +00002582 }
narpra015cdda352018-11-19 15:30:27 +00002583 else
2584 {
2585 for (unsigned int i = 0; i < inputCount; ++i)
2586 {
2587 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
2588 inputHandles.emplace_back(std::move(inputHandle));
2589 }
2590 }
telsoa014fcda012018-03-09 14:13:49 +00002591
2592 for (unsigned int i = 0; i < inputCount; ++i)
2593 {
surmeh013537c2c2018-05-18 16:31:43 +01002594 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00002595 }
2596
2597 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
2598
2599 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
2600
2601 for (auto& inputHandle : inputHandles)
2602 {
2603 inputHandle->Allocate();
2604 }
2605
2606 outputHandle->Allocate();
2607
2608 unsigned int nextInputId = 0;
2609 for (auto& inputHandle : inputHandles)
2610 {
surmeh013537c2c2018-05-18 16:31:43 +01002611 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
2612 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00002613 }
2614
2615 workload->Execute();
2616
surmeh013537c2c2018-05-18 16:31:43 +01002617 if (needPermuteForConcat)
2618 {
2619 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002620 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01002621 outputTensorInfo,
2622 permuteVector,
2623 std::move(outputHandle),
2624 output);
2625 }
2626 else
2627 {
2628 CopyDataFromITensorHandle(output, outputHandle.get());
2629 }
telsoa014fcda012018-03-09 14:13:49 +00002630}
2631
2632template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002633LayerTestResult<T, 1> Concatenation1dTestImpl(
2634 armnn::IWorkloadFactory& workloadFactory,
2635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2636 float qScale,
2637 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002638{
2639 armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
2640
2641 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
2642 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
2643 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
2644
2645 armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
2646
2647 LayerTestResult<T, 1> result(outputTensorInfo);
2648
2649 std::vector<T> output;
2650 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002651 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002652 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2653 { input0.data(), input1.data(), input2.data() },
2654 outputTensorInfo,
2655 output.data(),
2656 0,
2657 true);
telsoa014fcda012018-03-09 14:13:49 +00002658
2659 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
2660 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2661 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
2662 }));
2663
2664 return result;
2665}
2666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002667LayerTestResult<float, 1> Concatenation1dTest(
2668 armnn::IWorkloadFactory& workloadFactory,
2669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002670{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002671 return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002672}
2673
2674template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002675LayerTestResult<T, 2> Concatenation2dTestImpl(
2676 armnn::IWorkloadFactory& workloadFactory,
2677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002678 const armnn::TensorInfo& outputTensorInfo,
2679 unsigned int dimension,
2680 const float qScale,
2681 const int32_t qOffset)
2682{
2683 armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2684
2685 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2686 // Batch 0
2687 1.0f, 2.0f, 3.0f,
2688
2689 // Batch 1
2690 10.0f, 11.0f, 12.0f,
2691 }));
2692
2693 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2694 // Batch 0
2695 4.0f, 5.0f, 6.0f,
2696
2697 // Batch 1
2698 13.0f, 14.0f, 15.0f,
2699 }));
2700
2701 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2702 // Batch 0
2703 7.0f, 8.0f, 9.0f,
2704
2705 // Batch 1
2706 16.0f, 17.0f, 18.0f,
2707 }));
2708
2709 LayerTestResult<T, 2> result(outputTensorInfo);
2710
2711 std::vector<T> output;
2712 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002713 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002714 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
2715 { input0.data(), input1.data(), input2.data() },
2716 outputTensorInfo,
2717 output.data(),
2718 dimension,
2719 true);
telsoa014fcda012018-03-09 14:13:49 +00002720
2721 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2722 return result;
2723}
2724
2725template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002726LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
2727 armnn::IWorkloadFactory& workloadFactory,
2728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2729 float qScale,
2730 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002731{
2732 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2733
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002734 LayerTestResult<T, 2> result =
2735 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002736 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2737 // Batch 0
2738 1.0f, 2.0f, 3.0f,
2739
2740 // Batch 1
2741 10.0f, 11.0f, 12.0f,
2742
2743 // Batch 2
2744 4.0f, 5.0f, 6.0f,
2745
2746 // Batch 3
2747 13.0f, 14.0f, 15.0f,
2748
2749 // Batch 4
2750 7.0f, 8.0f, 9.0f,
2751
2752 // Batch 5
2753 16.0f, 17.0f, 18.0f,
2754 }));
2755
2756 return result;
2757}
2758
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002759LayerTestResult<float, 2> Concatenation2dDim0Test(
2760 armnn::IWorkloadFactory& workloadFactory,
2761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002762{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002763 return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002764}
2765
2766template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002767LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
2768 armnn::IWorkloadFactory& workloadFactory,
2769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2770 float qScale,
2771 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00002772{
2773 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2774
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002775 LayerTestResult<T, 2> result =
2776 Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00002777 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2778 // Batch 0
2779 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2780
2781 // Batch 1
2782 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
2783 }));
2784
2785 return result;
2786}
2787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002788LayerTestResult<float, 2> Concatenation2dDim1Test(
2789 armnn::IWorkloadFactory& workloadFactory,
2790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002791{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002792 return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002793}
2794
2795template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002796LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
2797 armnn::IWorkloadFactory& workloadFactory,
2798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2799 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002800 int32_t qOffset)
2801{
2802 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2803 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2804 // Batch 0
2805 1.0f, 2.0f, 3.0f,
2806
2807 // Batch 1
2808 10.0f, 11.0f, 12.0f,
2809 }));
2810
2811 armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
2812 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2813 // Batch 0
2814 4.0f, 5.0f, 6.0f,
2815
2816 // Batch 1
2817 13.0f, 14.0f, 15.0f,
2818
2819 // Batch 0
2820 7.0f, 8.0f, 9.0f,
2821 }));
2822
2823 armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
2824 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2825 // Batch 1
2826 16.0f, 17.0f, 18.0f,
2827 }));
2828
2829 armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
2830 LayerTestResult<T, 2> result(outputTensorInfo);
2831
2832 std::vector<T> output;
2833 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002834 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002835 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2836 { input0.data(), input1.data(), input2.data() },
2837 outputTensorInfo,
2838 output.data(),
2839 0,
2840 true);
telsoa014fcda012018-03-09 14:13:49 +00002841
2842 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2843 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2844 // Batch 0
2845 1.0f, 2.0f, 3.0f,
2846
2847 // Batch 1
2848 10.0f, 11.0f, 12.0f,
2849
2850 // Batch 2
2851 4.0f, 5.0f, 6.0f,
2852
2853 // Batch 3
2854 13.0f, 14.0f, 15.0f,
2855
2856 // Batch 4
2857 7.0f, 8.0f, 9.0f,
2858
2859 // Batch 5
2860 16.0f, 17.0f, 18.0f,
2861 }));
2862
2863 return result;
2864}
2865
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002866LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
2867 armnn::IWorkloadFactory& workloadFactory,
2868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002869{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002870 return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002871}
2872
2873template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002874LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
2875 armnn::IWorkloadFactory& workloadFactory,
2876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2877 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00002878 int32_t qOffset)
2879{
2880 armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
2881 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2882 // Batch 0
2883 1.0f, 2.0f, 3.0f,
2884
2885 // Batch 1
2886 10.0f, 11.0f, 12.0f,
2887 }));
2888
2889 armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
2890 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2891 // Batch 0
2892 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
2893
2894 // Batch 1
2895 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
2896 }));
2897
2898 armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
2899 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
2900 // Batch 0
2901 9.0f,
2902
2903 // Batch 1
2904 18.0f
2905 }));
2906
2907 armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
2908 LayerTestResult<T, 2> result(outputTensorInfo);
2909
2910 std::vector<T> output;
2911 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002912 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00002913 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
2914 { input0.data(), input1.data(), input2.data() },
2915 outputTensorInfo,
2916 output.data(),
2917 1,
2918 true);
telsoa014fcda012018-03-09 14:13:49 +00002919
2920 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
2921 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2922 // Batch 0
2923 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
2924
2925 // Batch 1
2926 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
2927 }));
2928
2929 return result;
2930}
2931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002932LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
2933 armnn::IWorkloadFactory& workloadFactory,
2934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00002935{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002936 return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00002937}
2938
2939template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002940LayerTestResult<T, 3> Concatenation3dTestImpl(
2941 armnn::IWorkloadFactory& workloadFactory,
2942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00002943 const armnn::TensorInfo& outputTensorInfo,
2944 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00002945 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00002946 float qScale,
2947 int32_t qOffset)
2948{
2949 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
2950
2951 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2952 // Batch 0, Channel 0
2953 1.0f, 2.0f,
2954
2955 // Batch 0, Channel 1
2956 3.0f, 4.0f,
2957
2958 // Batch 0, Channel 2
2959 5.0f, 6.0f,
2960
2961 // Batch 1, Channel 0
2962 19.0f, 20.0f,
2963
2964 // Batch 1, Channel 1
2965 21.0f, 22.0f,
2966
2967 // Batch 1, Channel 2
2968 23.0f, 24.0f
2969 }));
2970
2971 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2972 // Batch 0, Channel 0
2973 7.0f, 8.0f,
2974
2975 // Batch 0, Channel 1
2976 9.0f, 10.0f,
2977
2978 // Batch 0, Channel 2
2979 11.0f, 12.0f,
2980
2981 // Batch 1, Channel 0
2982 25.0f, 26.0f,
2983
2984 // Batch 1, Channel 1
2985 27.0f, 28.0f,
2986
2987 // Batch 1, Channel 2
2988 29.0f, 30.0f
2989 }));
2990
2991 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
2992 // Batch 0, Channel 0
2993 13.0f, 14.0f,
2994
2995 // Batch 0, Channel 1
2996 15.0f, 16.0f,
2997
2998 // Batch 0, Channel 2
2999 17.0f, 18.0f,
3000
3001 // Batch 1, Channel 0
3002 31.0f, 32.0f,
3003
3004 // Batch 1, Channel 1
3005 33.0f, 34.0f,
3006
3007 // Batch 1, Channel 2
3008 35.0f, 36.0f
3009 }));
3010
3011 LayerTestResult<T, 3> result(outputTensorInfo);
3012
3013 std::vector<T> output;
3014 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003015 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003016 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3017 { input0.data(), input1.data(), input2.data() },
3018 outputTensorInfo,
3019 output.data(),
3020 dimension,
3021 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003022
3023 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3024 return result;
3025}
3026
3027template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003028LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
3029 armnn::IWorkloadFactory& workloadFactory,
3030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3031 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003032 int32_t qOffset)
3033{
3034 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3035
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003036 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003037 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003038 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3039 // Batch 0, Channel 0
3040 1.0f, 2.0f,
3041
3042 // Batch 0, Channel 1
3043 3.0f, 4.0f,
3044
3045 // Batch 0, Channel 2
3046 5.0f, 6.0f,
3047
3048 // Batch 1, Channel 0
3049 19.0f, 20.0f,
3050
3051 // Batch 1, Channel 1
3052 21.0f, 22.0f,
3053
3054 // Batch 1, Channel 2
3055 23.0f, 24.0f,
3056
3057 // Batch 2, Channel 0
3058 7.0f, 8.0f,
3059
3060 // Batch 2, Channel 1
3061 9.0f, 10.0f,
3062
3063 // Batch 2, Channel 2
3064 11.0f, 12.0f,
3065
3066 // Batch 3, Channel 0
3067 25.0f, 26.0f,
3068
3069 // Batch 3, Channel 1
3070 27.0f, 28.0f,
3071
3072 // Batch 3, Channel 2
3073 29.0f, 30.0f,
3074
3075 // Batch 4, Channel 0
3076 13.0f, 14.0f,
3077
3078 // Batch 4, Channel 1
3079 15.0f, 16.0f,
3080
3081 // Batch 4, Channel 2
3082 17.0f, 18.0f,
3083
3084 // Batch 5, Channel 0
3085 31.0f, 32.0f,
3086
3087 // Batch 5, Channel 1
3088 33.0f, 34.0f,
3089
3090 // Batch 5, Channel 2
3091 35.0f, 36.0f
3092 }));
narpra015cdda352018-11-19 15:30:27 +00003093
telsoa014fcda012018-03-09 14:13:49 +00003094 return result;
3095}
3096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003097LayerTestResult<float, 3> Concatenation3dDim0Test(
3098 armnn::IWorkloadFactory& workloadFactory,
3099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003100{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003101 return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003102}
3103
3104template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003105LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
3106 armnn::IWorkloadFactory& workloadFactory,
3107 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3108 float qScale,
3109 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003110{
3111 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
3112
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003113 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003114 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
3115
telsoa014fcda012018-03-09 14:13:49 +00003116 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3117 // Batch 0, Channel 0
3118 1.0f, 2.0f,
3119
3120 // Batch 0, Channel 1
3121 3.0f, 4.0f,
3122
3123 // Batch 0, Channel 2
3124 5.0f, 6.0f,
3125
3126 // Batch 0, Channel 3
3127 7.0f, 8.0f,
3128
3129 // Batch 0, Channel 4
3130 9.0f, 10.0f,
3131
3132 // Batch 0, Channel 5
3133 11.0f, 12.0f,
3134
3135 // Batch 0, Channel 6
3136 13.0f, 14.0f,
3137
3138 // Batch 0, Channel 7
3139 15.0f, 16.0f,
3140
3141 // Batch 0, Channel 8
3142 17.0f, 18.0f,
3143
3144 // Batch 1, Channel 0
3145 19.0f, 20.0f,
3146
3147 // Batch 1, Channel 1
3148 21.0f, 22.0f,
3149
3150 // Batch 1, Channel 2
3151 23.0f, 24.0f,
3152
3153 // Batch 1, Channel 3
3154 25.0f, 26.0f,
3155
3156 // Batch 1, Channel 4
3157 27.0f, 28.0f,
3158
3159 // Batch 1, Channel 5
3160 29.0f, 30.0f,
3161
3162 // Batch 1, Channel 6
3163 31.0f, 32.0f,
3164
3165 // Batch 1, Channel 7
3166 33.0f, 34.0f,
3167
3168 // Batch 1, Channel 8
3169 35.0f, 36.0f
3170 }));
3171
3172 return result;
3173}
3174
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003175LayerTestResult<float, 3> Concatenation3dDim1Test(
3176 armnn::IWorkloadFactory& workloadFactory,
3177 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003178{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003179 return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003180}
3181
3182template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003183LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
3184 armnn::IWorkloadFactory& workloadFactory,
3185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003186 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003187 float qScale,
3188 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003189{
3190 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3191
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003192 LayerTestResult<T, 3> result =
narpra015cdda352018-11-19 15:30:27 +00003193 Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
3194
telsoa014fcda012018-03-09 14:13:49 +00003195 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3196 // Batch 0, Channel 0
3197 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
3198
3199 // Batch 0, Channel 1
3200 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
3201
3202 // Batch 0, Channel 2
3203 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
3204
3205 // Batch 1, Channel 0
3206 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
3207
3208 // Batch 1, Channel 1
3209 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
3210
3211 // Batch 1, Channel 2
3212 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
3213 }));
3214
3215 return result;
3216}
3217
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003218LayerTestResult<float, 3> Concatenation3dDim2Test(
3219 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003220 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3221 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003222{
narpra015cdda352018-11-19 15:30:27 +00003223 return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003224}
3225
3226template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003227LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
3228 armnn::IWorkloadFactory& workloadFactory,
3229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3230 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003231 int32_t qOffset)
3232{
3233 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3234 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3235 // Batch 0, Channel 0
3236 1.0f, 2.0f,
3237
3238 // Batch 0, Channel 1
3239 3.0f, 4.0f,
3240
3241 // Batch 0, Channel 2
3242 5.0f, 6.0f,
3243
3244 // Batch 1, Channel 0
3245 19.0f, 20.0f,
3246
3247 // Batch 1, Channel 1
3248 21.0f, 22.0f,
3249
3250 // Batch 1, Channel 2
3251 23.0f, 24.0f
3252 }));
3253
3254 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
3255 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3256 // Batch 0, Channel 0
3257 7.0f, 8.0f,
3258
3259 // Batch 0, Channel 1
3260 9.0f, 10.0f,
3261
3262 // Batch 0, Channel 2
3263 11.0f, 12.0f,
3264 }));
3265
3266 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
3267 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3268 // Batch 0, Channel 0
3269 25.0f, 26.0f,
3270
3271 // Batch 0, Channel 1
3272 27.0f, 28.0f,
3273
3274 // Batch 0, Channel 2
3275 29.0f, 30.0f,
3276
3277 // Batch 1, Channel 0
3278 13.0f, 14.0f,
3279
3280 // Batch 1, Channel 1
3281 15.0f, 16.0f,
3282
3283 // Batch 1, Channel 2
3284 17.0f, 18.0f,
3285
3286 // Batch 2, Channel 0
3287 31.0f, 32.0f,
3288
3289 // Batch 2, Channel 1
3290 33.0f, 34.0f,
3291
3292 // Batch 2, Channel 2
3293 35.0f, 36.0f
3294 }));
3295
3296 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
3297 LayerTestResult<T, 3> result(outputTensorInfo);
3298
3299 std::vector<T> output;
3300 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003301 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003302 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3303 { input0.data(), input1.data(), input2.data() },
3304 outputTensorInfo,
3305 output.data(),
3306 0,
3307 true);
telsoa014fcda012018-03-09 14:13:49 +00003308
3309 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3310 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3311 // Batch 0, Channel 0
3312 1.0f, 2.0f,
3313
3314 // Batch 0, Channel 1
3315 3.0f, 4.0f,
3316
3317 // Batch 0, Channel 2
3318 5.0f, 6.0f,
3319
3320 // Batch 1, Channel 0
3321 19.0f, 20.0f,
3322
3323 // Batch 1, Channel 1
3324 21.0f, 22.0f,
3325
3326 // Batch 1, Channel 2
3327 23.0f, 24.0f,
3328
3329 // Batch 2, Channel 0
3330 7.0f, 8.0f,
3331
3332 // Batch 2, Channel 1
3333 9.0f, 10.0f,
3334
3335 // Batch 2, Channel 2
3336 11.0f, 12.0f,
3337
3338 // Batch 3, Channel 0
3339 25.0f, 26.0f,
3340
3341 // Batch 3, Channel 1
3342 27.0f, 28.0f,
3343
3344 // Batch 3, Channel 2
3345 29.0f, 30.0f,
3346
3347 // Batch 4, Channel 0
3348 13.0f, 14.0f,
3349
3350 // Batch 4, Channel 1
3351 15.0f, 16.0f,
3352
3353 // Batch 4, Channel 2
3354 17.0f, 18.0f,
3355
3356 // Batch 5, Channel 0
3357 31.0f, 32.0f,
3358
3359 // Batch 5, Channel 1
3360 33.0f, 34.0f,
3361
3362 // Batch 5, Channel 2
3363 35.0f, 36.0f
3364 }));
3365
3366 return result;
3367}
3368
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003369LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
3370 armnn::IWorkloadFactory& workloadFactory,
3371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003372{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003373 return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003374}
3375
3376template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003377LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
3378 armnn::IWorkloadFactory& workloadFactory,
3379 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3380 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003381 int32_t qOffset)
3382{
3383 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3384 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3385 // Batch 0, Channel 0
3386 1.0f, 2.0f,
3387
3388 // Batch 0, Channel 1
3389 3.0f, 4.0f,
3390
3391 // Batch 0, Channel 2
3392 5.0f, 6.0f,
3393
3394 // Batch 1, Channel 0
3395 19.0f, 20.0f,
3396
3397 // Batch 1, Channel 1
3398 21.0f, 22.0f,
3399
3400 // Batch 1, Channel 2
3401 23.0f, 24.0f
3402 }));
3403
3404 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
3405 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3406 // Batch 0, Channel 0
3407 7.0f, 8.0f,
3408
3409 // Batch 0, Channel 1
3410 9.0f, 10.0f,
3411
3412 // Batch 0, Channel 2
3413 11.0f, 12.0f,
3414
3415 // Batch 0, Channel 3
3416 25.0f, 26.0f,
3417
3418 // Batch 1, Channel 0
3419 27.0f, 28.0f,
3420
3421 // Batch 1, Channel 1
3422 29.0f, 30.0f,
3423
3424 // Batch 1, Channel 2
3425 13.0f, 14.0f,
3426
3427 // Batch 1, Channel 3
3428 15.0f, 16.0f,
3429 }));
3430
3431 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
3432 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3433 // Batch 0, Channel 0
3434 17.0f, 18.0f,
3435
3436 // Batch 1, Channel 0
3437 31.0f, 32.0f,
3438 }));
3439
3440 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
3441 LayerTestResult<T, 3> result(outputTensorInfo);
3442
3443 std::vector<T> output;
3444 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003445 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003446 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3447 { input0.data(), input1.data(), input2.data() },
3448 outputTensorInfo,
3449 output.data(),
3450 1,
3451 true);
telsoa014fcda012018-03-09 14:13:49 +00003452
3453 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3454 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3455 // Batch 0, Channel 0
3456 1.0f, 2.0f,
3457
3458 // Batch 0, Channel 1
3459 3.0f, 4.0f,
3460
3461 // Batch 0, Channel 2
3462 5.0f, 6.0f,
3463
3464 // Batch 0, Channel 3
3465 7.0f, 8.0f,
3466
3467 // Batch 0, Channel 4
3468 9.0f, 10.0f,
3469
3470 // Batch 0, Channel 5
3471 11.0f, 12.0f,
3472
3473 // Batch 0, Channel 6
3474 25.0f, 26.0f,
3475
3476 // Batch 0, Channel 7
3477 17.0f, 18.0f,
3478
3479 // Batch 1, Channel 0
3480 19.0f, 20.0f,
3481
3482 // Batch 1, Channel 1
3483 21.0f, 22.0f,
3484
3485 // Batch 1, Channel 2
3486 23.0f, 24.0f,
3487
3488 // Batch 1, Channel 3
3489 27.0f, 28.0f,
3490
3491 // Batch 1, Channel 4
3492 29.0f, 30.0f,
3493
3494 // Batch 1, Channel 5
3495 13.0f, 14.0f,
3496
3497 // Batch 1, Channel 6
3498 15.0f, 16.0f,
3499
3500 // Batch 1, Channel 7
3501 31.0f, 32.0f,
3502 }));
3503
3504 return result;
3505}
3506
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003507LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
3508 armnn::IWorkloadFactory& workloadFactory,
3509 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003510{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003511 return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003512}
3513
3514template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003515LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
3516 armnn::IWorkloadFactory& workloadFactory,
3517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003518 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003519 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003520 int32_t qOffset)
3521{
3522 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
3523 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3524 // Batch 0, Channel 0
3525 1.0f, 2.0f,
3526
3527 // Batch 0, Channel 1
3528 3.0f, 4.0f,
3529
3530 // Batch 0, Channel 2
3531 5.0f, 6.0f,
3532
3533 // Batch 1, Channel 0
3534 19.0f, 20.0f,
3535
3536 // Batch 1, Channel 1
3537 21.0f, 22.0f,
3538
3539 // Batch 1, Channel 2
3540 23.0f, 24.0f
3541 }));
3542
3543 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
3544 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3545 // Batch 0, Channel 0
3546 7.0f,
3547
3548 // Batch 0, Channel 1
3549 9.0f,
3550
3551 // Batch 0, Channel 2
3552 11.0f,
3553
3554 // Batch 1, Channel 0
3555 25.0f,
3556
3557 // Batch 1, Channel 1
3558 27.0f,
3559
3560 // Batch 1, Channel 2
3561 29.0f
3562 }));
3563
3564 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
3565 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3566 // Batch 0, Channel 0
3567 13.0f, 14.0f, 50.0f,
3568
3569 // Batch 0, Channel 1
3570 15.0f, 16.0f, 51.0f,
3571
3572 // Batch 0, Channel 2
3573 17.0f, 18.0f, 52.0f,
3574
3575 // Batch 1, Channel 0
3576 31.0f, 32.0f, 53.0f,
3577
3578 // Batch 1, Channel 1
3579 33.0f, 34.0f, 54.0f,
3580
3581 // Batch 1, Channel 2
3582 35.0f, 36.0f, 55.0f,
3583 }));
3584
3585 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
3586 LayerTestResult<T, 3> result(outputTensorInfo);
3587
3588 std::vector<T> output;
3589 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003590 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003591 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3592 { input0.data(), input1.data(), input2.data() },
3593 outputTensorInfo,
3594 output.data(),
3595 2,
3596 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00003597
3598 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
3599 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3600 // Batch 0, Channel 0
3601 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
3602
3603 // Batch 0, Channel 1
3604 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
3605
3606 // Batch 0, Channel 2
3607 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
3608
3609 // Batch 1, Channel 0
3610 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
3611
3612 // Batch 1, Channel 1
3613 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
3614
3615 // Batch 1, Channel 2
3616 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
3617 }));
3618
3619 return result;
3620}
3621
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003622LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
3623 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00003624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3625 bool useSubtensor)
3626{
3627 return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
3628}
3629
3630template <typename T>
3631LayerTestResult<T, 4> Concatenation4dTestImpl(
3632 armnn::IWorkloadFactory& workloadFactory,
3633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3634 const armnn::TensorInfo& outputTensorInfo,
3635 unsigned int dimension,
3636 bool useSubtensor,
3637 float qScale,
3638 int32_t qOffset)
3639{
3640 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
3641
3642 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3643 1.0f, 2.0f,
3644 3.0f, 4.0f,
3645 5.0f, 6.0f,
3646 7.0f, 8.0f,
3647 9.0f, 10.0f,
3648 11.0f, 12.0f
3649 }));
3650
3651 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3652 11.0f, 12.0f,
3653 13.0f, 14.0f,
3654 15.0f, 16.0f,
3655 17.0f, 18.0f,
3656 19.0f, 20.0f,
3657 21.0f, 22.0f
3658 }));
3659
3660 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3661 21.0f, 22.0f,
3662 23.0f, 24.0f,
3663 25.0f, 26.0f,
3664 27.0f, 28.0f,
3665 29.0f, 30.0f,
3666 31.0f, 32.0f
3667 }));
3668
3669 LayerTestResult<T, 4> result(outputTensorInfo);
3670
3671 std::vector<T> output;
3672 output.resize(outputTensorInfo.GetNumElements());
3673
3674 Concatenate<T>(workloadFactory,
3675 memoryManager,
3676 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
3677 {input0.data(), input1.data(), input2.data()},
3678 outputTensorInfo,
3679 output.data(),
3680 dimension,
3681 useSubtensor);
3682
3683 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
3684 return result;
3685}
3686
3687template <typename T>
3688LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
3689 armnn::IWorkloadFactory& workloadFactory,
3690 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3691 float qScale,
3692 int32_t qOffset)
3693{
3694 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
3695
3696 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
3697 true, qScale, qOffset);
3698 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3699 1.0f, 2.0f,
3700 3.0f, 4.0f,
3701 5.0f, 6.0f,
3702 7.0f, 8.0f,
3703 9.0f, 10.0f,
3704 11.0f, 12.0f,
3705
3706 11.0f, 12.0f,
3707 13.0f, 14.0f,
3708 15.0f, 16.0f,
3709 17.0f, 18.0f,
3710 19.0f, 20.0f,
3711 21.0f, 22.0f,
3712
3713 21.0f, 22.0f,
3714 23.0f, 24.0f,
3715 25.0f, 26.0f,
3716 27.0f, 28.0f,
3717 29.0f, 30.0f,
3718 31.0f, 32.0f
3719 }));
3720 return result;
3721}
3722
3723LayerTestResult<float, 4> Concatenation4dDim0Test(
3724 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003726{
narpra015cdda352018-11-19 15:30:27 +00003727 return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3728}
3729
3730template <typename T>
3731LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
3732 armnn::IWorkloadFactory& workloadFactory,
3733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3734 float qScale,
3735 int32_t qOffset)
3736{
3737 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
3738
3739 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
3740 true, qScale, qOffset);
3741 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3742 1.0f, 2.0f,
3743 3.0f, 4.0f,
3744 5.0f, 6.0f,
3745 7.0f, 8.0f,
3746 9.0f, 10.0f,
3747 11.0f, 12.0f,
3748
3749 11.0f, 12.0f,
3750 13.0f, 14.0f,
3751 15.0f, 16.0f,
3752 17.0f, 18.0f,
3753 19.0f, 20.0f,
3754 21.0f, 22.0f,
3755
3756 21.0f, 22.0f,
3757 23.0f, 24.0f,
3758 25.0f, 26.0f,
3759 27.0f, 28.0f,
3760 29.0f, 30.0f,
3761 31.0f, 32.0f
3762 }));
3763
3764 return result;
3765}
3766
3767LayerTestResult<float, 4> Concatenation4dDim1Test(
3768 armnn::IWorkloadFactory& workloadFactory,
3769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3770{
3771 return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3772}
3773
3774template <typename T>
3775LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
3776 armnn::IWorkloadFactory& workloadFactory,
3777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3778 float qScale,
3779 int32_t qOffset)
3780{
3781 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
3782
3783 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
3784 true, qScale, qOffset);
3785 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3786 1.0f, 2.0f,
3787 3.0f, 4.0f,
3788 11.0f, 12.0f,
3789 13.0f, 14.0f,
3790 21.0f, 22.0f,
3791 23.0f, 24.0f,
3792
3793 5.0f, 6.0f,
3794 7.0f, 8.0f,
3795 15.0f, 16.0f,
3796 17.0f, 18.0f,
3797 25.0f, 26.0f,
3798 27.0f, 28.0f,
3799
3800 9.0f, 10.0f,
3801 11.0f, 12.0f,
3802 19.0f, 20.0f,
3803 21.0f, 22.0f,
3804 29.0f, 30.0f,
3805 31.0f, 32.0f
3806 }));
3807
3808 return result;
3809}
3810
3811LayerTestResult<float, 4> Concatenation4dDim2Test(
3812 armnn::IWorkloadFactory& workloadFactory,
3813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3814{
3815 return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3816}
3817
3818template <typename T>
3819LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
3820 armnn::IWorkloadFactory& workloadFactory,
3821 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3822 float qScale,
3823 int32_t qOffset,
3824 bool useSubtensor)
3825{
3826 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
3827
3828 LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
3829 useSubtensor, qScale, qOffset);
3830 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3831 1.0f, 2.0f,
3832 11.0f, 12.0f,
3833 21.0f, 22.0f,
3834 3.0f, 4.0f,
3835 13.0f, 14.0f,
3836 23.0f, 24.0f,
3837
3838 5.0f, 6.0f,
3839 15.0f, 16.0f,
3840 25.0f, 26.0f,
3841 7.0f, 8.0f,
3842 17.0f, 18.0f,
3843 27.0f, 28.0f,
3844
3845 9.0f, 10.0f,
3846 19.0f, 20.0f,
3847 29.0f, 30.0f,
3848 11.0f, 12.0f,
3849 21.0f, 22.0f,
3850 31.0f, 32.0f
3851 }));
3852
3853 return result;
3854}
3855
3856LayerTestResult<float, 4> Concatenation4dDim3Test(
3857 armnn::IWorkloadFactory& workloadFactory,
3858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3859 bool useSubtensor)
3860{
3861 return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
3862}
3863
3864template <typename T>
3865LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
3866 armnn::IWorkloadFactory& workloadFactory,
3867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3868 float qScale,
3869 int32_t qOffset)
3870{
3871 unsigned int dimension = 0;
3872 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
3873
3874 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
3875 1.0f, 2.0f,
3876 3.0f, 4.0f,
3877 5.0f, 6.0f,
3878 7.0f, 8.0f,
3879 9.0f, 10.0f,
3880 11.0f, 12.0f
3881 }));
3882
3883 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
3884
3885 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
3886 11.0f, 12.0f,
3887 13.0f, 14.0f,
3888 15.0f, 16.0f,
3889 17.0f, 18.0f,
3890 19.0f, 20.0f,
3891 21.0f, 22.0f,
3892
3893 21.0f, 22.0f,
3894 23.0f, 24.0f,
3895 25.0f, 26.0f,
3896 27.0f, 28.0f,
3897 29.0f, 30.0f,
3898 31.0f, 32.0f
3899
3900 }));
3901
3902 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
3903
3904 LayerTestResult<T, 4> result(outputTensorInfo);
3905
3906 std::vector<T> output;
3907 output.resize(outputTensorInfo.GetNumElements());
3908 Concatenate<T>(workloadFactory,
3909 memoryManager,
3910 {inputTensorInfo0, inputTensorInfo1},
3911 {input0.data(), input1.data()},
3912 outputTensorInfo,
3913 output.data(),
3914 dimension,
3915 true);
3916
3917 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
3918 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3919 1.0f, 2.0f,
3920 3.0f, 4.0f,
3921 5.0f, 6.0f,
3922 7.0f, 8.0f,
3923 9.0f, 10.0f,
3924 11.0f, 12.0f,
3925
3926 11.0f, 12.0f,
3927 13.0f, 14.0f,
3928 15.0f, 16.0f,
3929 17.0f, 18.0f,
3930 19.0f, 20.0f,
3931 21.0f, 22.0f,
3932
3933 21.0f, 22.0f,
3934 23.0f, 24.0f,
3935 25.0f, 26.0f,
3936 27.0f, 28.0f,
3937 29.0f, 30.0f,
3938 31.0f, 32.0f
3939 }));
3940
3941 return result;
3942}
3943
3944LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
3945 armnn::IWorkloadFactory& workloadFactory,
3946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
3947{
3948 return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
3949}
3950
3951template <typename T>
3952LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
3953 armnn::IWorkloadFactory& workloadFactory,
3954 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3955 float qScale,
3956 int32_t qOffset)
3957{
3958 unsigned int dimension = 1;
3959 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
3960
3961 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
3962 1.0f, 2.0f,
3963 3.0f, 4.0f,
3964 5.0f, 6.0f,
3965 7.0f, 8.0f,
3966 9.0f, 10.0f,
3967 11.0f, 12.0f
3968 }));
3969
3970 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
3971
3972 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
3973 11.0f, 12.0f,
3974 13.0f, 14.0f,
3975 15.0f, 16.0f,
3976 17.0f, 18.0f,
3977
3978 }));
3979
3980 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
3981
3982 LayerTestResult<T, 4> result(outputTensorInfo);
3983
3984 std::vector<T> output;
3985 output.resize(outputTensorInfo.GetNumElements());
3986 Concatenate<T>(workloadFactory,
3987 memoryManager,
3988 {inputTensorInfo0, inputTensorInfo1},
3989 {input0.data(), input1.data()},
3990 outputTensorInfo,
3991 output.data(),
3992 dimension,
3993 true);
3994
3995 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
3996 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3997 1.0f, 2.0f,
3998 3.0f, 4.0f,
3999 5.0f, 6.0f,
4000 7.0f, 8.0f,
4001 9.0f, 10.0f,
4002 11.0f, 12.0f,
4003 11.0f, 12.0f,
4004 13.0f, 14.0f,
4005 15.0f, 16.0f,
4006 17.0f, 18.0f
4007 }));
4008
4009 return result;
4010}
4011
4012LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
4013 armnn::IWorkloadFactory& workloadFactory,
4014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4015{
4016 return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4017}
4018
4019template <typename T>
4020LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
4021 armnn::IWorkloadFactory& workloadFactory,
4022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4023 float qScale,
4024 int32_t qOffset)
4025{
4026 unsigned int dimension = 2;
4027 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4028
4029 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4030 1.0f, 2.0f,
4031 3.0f, 4.0f,
4032 5.0f, 6.0f,
4033 7.0f, 8.0f,
4034 9.0f, 10.0f,
4035 11.0f, 12.0f
4036 }));
4037
4038 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
4039
4040 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4041 11.0f, 12.0f,
4042 13.0f, 14.0f,
4043 15.0f, 16.0f,
4044 17.0f, 18.0f,
4045 19.0f, 20.0f,
4046 21.0f, 22.0f,
4047 23.0f, 24.0f,
4048 25.0f, 26.0f,
4049 27.0f, 28.0f
4050 }));
4051
4052 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
4053
4054 LayerTestResult<T, 4> result(outputTensorInfo);
4055
4056 std::vector<T> output;
4057 output.resize(outputTensorInfo.GetNumElements());
4058 Concatenate<T>(workloadFactory,
4059 memoryManager,
4060 {inputTensorInfo0, inputTensorInfo1},
4061 {input0.data(), input1.data()},
4062 outputTensorInfo,
4063 output.data(),
4064 dimension,
4065 true);
4066
4067 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4068 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4069 1.0f, 2.0f,
4070 3.0f, 4.0f,
4071 11.0f, 12.0f,
4072 13.0f, 14.0f,
4073 15.0f, 16.0f,
4074
4075 5.0f, 6.0f,
4076 7.0f, 8.0f,
4077 17.0f, 18.0f,
4078 19.0f, 20.0f,
4079 21.0f, 22.0f,
4080
4081 9.0f, 10.0f,
4082 11.0f, 12.0f,
4083 23.0f, 24.0f,
4084 25.0f, 26.0f,
4085 27.0f, 28.0f
4086 }));
4087
4088 return result;
4089}
4090
4091LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
4092 armnn::IWorkloadFactory& workloadFactory,
4093 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4094{
4095 return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
4096}
4097
4098template <typename T>
4099LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
4100 armnn::IWorkloadFactory& workloadFactory,
4101 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4102 float qScale,
4103 int32_t qOffset,
4104 bool useSubtensor)
4105{
4106 unsigned int dimension = 3;
4107 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
4108
4109 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4110 1.0f, 2.0f,
4111 3.0f, 4.0f,
4112 5.0f, 6.0f,
4113 7.0f, 8.0f,
4114 9.0f, 10.0f,
4115 11.0f, 12.0f
4116 }));
4117
4118 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
4119
4120 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4121 11.0f, 12.0f, 13.0f,
4122 14.0f, 15.0f, 16.0f,
4123
4124 17.0f, 18.0f, 19.0f,
4125 20.0f, 21.0f, 22.0f,
4126
4127 23.0f, 24.0f, 25.0f,
4128 26.0f, 27.0f, 28.0f
4129 }));
4130
4131 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
4132
4133 LayerTestResult<T, 4> result(outputTensorInfo);
4134
4135 std::vector<T> output;
4136 output.resize(outputTensorInfo.GetNumElements());
4137 Concatenate<T>(workloadFactory,
4138 memoryManager,
4139 {inputTensorInfo0, inputTensorInfo1},
4140 {input0.data(), input1.data()},
4141 outputTensorInfo,
4142 output.data(),
4143 dimension,
4144 useSubtensor);
4145
4146 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4147 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4148 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
4149 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
4150 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
4151 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
4152 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
4153 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
4154 }));
4155
4156 return result;
4157}
4158
4159LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
4160 armnn::IWorkloadFactory& workloadFactory,
4161 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4162 bool useSubtensor)
4163{
4164 return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004165}
4166
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004167LayerTestResult<float, 4> ResizeBilinearNopTest(
4168 armnn::IWorkloadFactory& workloadFactory,
4169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004170 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004171{
Nina Drozdd41b2592018-11-19 13:03:36 +00004172 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4173 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004174
James Conroy6b965822018-11-01 11:33:09 +00004175 std::vector<float> inputData({
4176 1.0f, 2.0f, 3.0f, 4.0f,
4177 2.0f, 3.0f, 4.0f, 5.0f,
4178 3.0f, 4.0f, 5.0f, 6.0f,
4179 4.0f, 5.0f, 6.0f, 7.0f,
4180
telsoa014fcda012018-03-09 14:13:49 +00004181 1.0f, 2.0f, 3.0f, 4.0f,
4182 2.0f, 3.0f, 4.0f, 5.0f,
4183 3.0f, 4.0f, 5.0f, 6.0f,
4184 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00004185 });
4186
4187 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004188 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004189 {
4190 std::vector<float> tmp(inputData.size());
4191 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4192 inputData = tmp;
4193 }
4194
4195 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004196
4197 LayerTestResult<float, 4> result(outputTensorInfo);
4198 result.outputExpected = input;
4199
4200 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4201 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4202
4203 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004204 descriptor.m_Parameters.m_DataLayout = dataLayout;
4205 armnn::WorkloadInfo info;
4206 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4207 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4208
4209 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4210
4211 inputHandle->Allocate();
4212 outputHandle->Allocate();
4213 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4214
James Conroy074f3712018-10-03 09:32:03 +01004215 workload->Execute();
4216
4217 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4218 return result;
4219}
4220
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004221LayerTestResult<float, 4> SimpleResizeBilinearTest(
4222 armnn::IWorkloadFactory& workloadFactory,
4223 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004224 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01004225{
Nina Drozdd41b2592018-11-19 13:03:36 +00004226 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
4227 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
James Conroy074f3712018-10-03 09:32:03 +01004228
James Conroy6b965822018-11-01 11:33:09 +00004229 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004230 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00004231 200.0f, 250.0f,
4232
4233 250.0f, 200.0f,
4234 250.0f, 1.0f
4235 });
James Conroy074f3712018-10-03 09:32:03 +01004236
4237 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
4238 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00004239 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
4240 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
4241 // which we would expect if projecting the centre).
4242
4243 std::vector<float> outputData({
4244 1.0f,
4245
4246 250.0f
4247 });
4248
4249 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004250 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004251 {
4252 std::vector<float> tmp(inputData.size());
4253 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4254 inputData = tmp;
4255
4256 std::vector<float> tmp1(outputData.size());
4257 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4258 outputData = tmp1;
4259 }
4260
4261 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4262
James Conroy074f3712018-10-03 09:32:03 +01004263 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004264 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01004265
4266 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4267 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4268
4269 armnn::ResizeBilinearQueueDescriptor descriptor;
4270 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004271 armnn::WorkloadInfo info;
4272 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4273 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4274
4275 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4276
4277 inputHandle->Allocate();
4278 outputHandle->Allocate();
4279 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4280
4281 workload->Execute();
4282
4283 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4284 return result;
4285}
4286
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004287LayerTestResult<float, 4> ResizeBilinearSqMinTest(
4288 armnn::IWorkloadFactory& workloadFactory,
4289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004290 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004291{
Nina Drozdd41b2592018-11-19 13:03:36 +00004292 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
4293 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004294
James Conroy6b965822018-11-01 11:33:09 +00004295 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004296 1.0f, 2.0f, 3.0f, 4.0f,
4297 2.0f, 3.0f, 4.0f, 5.0f,
4298 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00004299 4.0f, 5.0f, 6.0f, 7.0f,
4300
4301 7.0f, 6.0f, 5.0f, 4.0f,
4302 6.0f, 5.0f, 4.0f, 3.0f,
4303 5.0f, 4.0f, 3.0f, 2.0f,
4304 4.0f, 3.0f, 2.0f, 1.0f
4305 });
4306
4307 std::vector<float> outputData({
4308 1.0f, 3.0f,
4309 3.0f, 5.0f,
4310
4311 7.0f, 5.0f,
4312 5.0f, 3.0f
4313 });
4314
4315 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004316 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004317 {
4318 std::vector<float> tmp(inputData.size());
4319 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4320 inputData = tmp;
4321
4322 std::vector<float> tmp1(outputData.size());
4323 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4324 outputData = tmp1;
4325 }
4326
4327 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004328
telsoa014fcda012018-03-09 14:13:49 +00004329 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004330 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004331
4332 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4333 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4334
4335 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004336 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004337 armnn::WorkloadInfo info;
4338 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4339 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4340
4341 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4342
4343 inputHandle->Allocate();
4344 outputHandle->Allocate();
4345 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4346
4347 workload->Execute();
4348
4349 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4350 return result;
4351}
4352
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004353LayerTestResult<float, 4> ResizeBilinearMinTest(
4354 armnn::IWorkloadFactory& workloadFactory,
4355 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004356 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004357{
Nina Drozdd41b2592018-11-19 13:03:36 +00004358 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
4359 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004360
James Conroy6b965822018-11-01 11:33:09 +00004361 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004362 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
4363 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00004364 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
4365
4366 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
4367 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
4368 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
4369 });
4370
4371 std::vector<float> outputData({
4372 1.0f, 2.6666f, 6.00f,
4373 78.5f, 179.3333f, 401.00f,
4374
4375 987.0f, 454.6670f, 203.33f,
4376 48.5f, 22.3333f, 10.00f
4377 });
4378
4379 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004380 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004381 {
4382 std::vector<float> tmp(inputData.size());
4383 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4384 inputData = tmp;
4385
4386 std::vector<float> tmp1(outputData.size());
4387 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4388 outputData = tmp1;
4389 }
4390
4391 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00004392
4393 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00004394 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004395
4396 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4397 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4398
4399 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004400 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004401 armnn::WorkloadInfo info;
4402 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4403 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4404
4405 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4406
4407 inputHandle->Allocate();
4408 outputHandle->Allocate();
4409 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4410
4411 workload->Execute();
4412
4413 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4414 return result;
4415}
4416
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004417LayerTestResult<float, 4> ResizeBilinearMagTest(
4418 armnn::IWorkloadFactory& workloadFactory,
4419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00004420 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00004421{
Nina Drozdd41b2592018-11-19 13:03:36 +00004422 const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
4423 const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00004424
James Conroy6b965822018-11-01 11:33:09 +00004425 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01004426 1.0f, 2.0f,
4427 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004428 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00004429
James Conroy6b965822018-11-01 11:33:09 +00004430 233.0f, 144.0f,
4431 21.0f, 13.0f,
4432 2.0f, 1.0f
4433 });
4434
4435 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01004436 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
4437 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00004438 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
4439
4440 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
4441 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
4442 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
4443 });
4444
4445 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00004446 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00004447 {
4448 std::vector<float> tmp(inputData.size());
4449 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4450 inputData = tmp;
4451
4452 std::vector<float> tmp1(outputData.size());
4453 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
4454 outputData = tmp1;
4455 }
4456
4457 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
4458
4459 LayerTestResult<float, 4> result(outputTensorInfo);
4460 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00004461
4462 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4463 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4464
4465 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01004466 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00004467 armnn::WorkloadInfo info;
4468 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4469 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4470
4471 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
4472
4473 inputHandle->Allocate();
4474 outputHandle->Allocate();
4475 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
4476
4477 workload->Execute();
4478
4479 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4480 return result;
4481}
4482
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004483LayerTestResult<float, 2> FakeQuantizationTest(
4484 armnn::IWorkloadFactory& workloadFactory,
4485 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004486{
4487 constexpr unsigned int width = 2;
4488 constexpr unsigned int height = 3;
4489
4490 const armnn::TensorInfo tensorInfo({height, width },
4491 armnn::DataType::Float32);
4492 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4493 -10.0f, -5.0f,
4494 0.0f, 5.0f,
4495 10.0f, 10.0f
4496 }));
4497
4498 LayerTestResult<float, 2> ret(tensorInfo);
4499
4500 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4501
4502 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
4503
4504 armnn::FakeQuantizationQueueDescriptor data;
4505 armnn::WorkloadInfo info;
4506
4507 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
4508 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
4509 float min = -10.f;
4510 float max = 10.f;
4511
4512 data.m_Parameters.m_Min = min;
4513 data.m_Parameters.m_Max = max;
4514
4515 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
4516 armnn::FakeQuantizationQueueDescriptor refData = data;
4517 armnn::WorkloadInfo refInfo = info;
4518 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
4519
4520 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
4521
4522 inputHandle->Allocate();
4523 outputHandle->Allocate();
4524
4525 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
4526
4527 workload->Execute();
4528
4529 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
4530
4531 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
4532 0.0f, 63.0f,
4533 128.0f, 191.0f,
4534 255.0f, 255.0f
4535 }));
4536 return ret;
4537}
4538
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004539namespace
4540{
4541
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004542LayerTestResult<float, 4> L2NormalizationTestImpl(
4543 armnn::IWorkloadFactory& workloadFactory,
4544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4545 const armnn::TensorShape& inputOutputTensorShape,
4546 const std::vector<float>& inputValues,
4547 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00004548 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004549{
4550 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4551 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
4552
jimfly013aab7c32018-11-12 13:32:08 +00004553 // at this point if we require it permute the input data
4554 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
4555 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004556 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004557 {
4558 std::vector<float> tmp(inputData.size());
4559 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
4560 inputData = tmp;
4561 }
4562
4563 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004564
4565 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00004566 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00004567 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00004568 {
4569 std::vector<float> tmp(expectedOutputData.size());
4570 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data());
4571 expectedOutputData = tmp;
4572 }
4573 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004574
4575 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4576 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4577
4578 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00004579 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004580 armnn::WorkloadInfo info;
4581
4582 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4583 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4584
4585 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
4586
4587 inputHandle->Allocate();
4588 outputHandle->Allocate();
4589
4590 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4591
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004592 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01004593
4594 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4595
4596 return result;
4597}
4598
4599float CalcInvL2Norm(std::initializer_list<float> elements)
4600{
4601 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
4602 [](float acc, float element) { return acc + element * element; });
4603 return 1.0f / sqrtf(reduction);
4604}
4605
4606} // anonymous namespace
4607
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004608template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004609LayerTestResult<T, 2> Pad2dTestCommon(
4610 armnn::IWorkloadFactory& workloadFactory,
4611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4612 float qScale,
4613 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004614{
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004615 const armnn::TensorShape inputShape{ 3, 3 };
4616 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004617
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004618 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4619 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004620
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004621 std::vector<T> inputValues(
4622 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004623 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004624 // Height (3) x Width (3)
4625 4, 8, 6,
4626 7, 4, 4,
4627 3, 2, 4
4628 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004629
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004630 std::vector<T> expectedOutputValues(
4631 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004632 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004633 0, 0, 0, 0, 0, 0, 0,
4634 0, 0, 0, 0, 0, 0, 0,
4635 0, 0, 4, 8, 6, 0, 0,
4636 0, 0, 7, 4, 4, 0, 0,
4637 0, 0, 3, 2, 4, 0, 0,
4638 0, 0, 0, 0, 0, 0, 0,
4639 0, 0, 0, 0, 0, 0, 0
4640 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004641
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004642 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004643
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004644 LayerTestResult<T, 2> result(outputTensorInfo);
4645 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004646
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004647 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4648 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004649
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004650 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004651
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004652 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4653 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
4654 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004655
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004656 descriptor.m_Parameters.m_PadList = PadList;
4657 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004658
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004659 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4660 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004661
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004662 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004663
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004664 inputHandle->Allocate();
4665 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004666
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004667 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004668
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004669 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004670
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004671 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004672
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004673 return result;
4674}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004675
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004676template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004677LayerTestResult<T, 3> Pad3dTestCommon(
4678 armnn::IWorkloadFactory& workloadFactory,
4679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4680 float qScale,
4681 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004682{
4683 const armnn::TensorShape inputShape{ 2, 2, 2 };
4684 const armnn::TensorShape outputShape{ 3, 5, 6 };
4685
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004686 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4687 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004688
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004689 std::vector<T> inputValues(
4690 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004691 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004692 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004693 0, 4,
4694 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004695
4696 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004697 6, 1,
4698 5, 2
4699 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004700
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004701 std::vector<T> expectedOutputValues(
4702 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004703 {
4704
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004705 0, 0, 0, 0, 0, 0,
4706 0, 0, 0, 0, 0, 0,
4707 0, 0, 0, 4, 0, 0,
4708 0, 0, 2, 5, 0, 0,
4709 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004710
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004711 0, 0, 0, 0, 0, 0,
4712 0, 0, 0, 0, 0, 0,
4713 0, 0, 6, 1, 0, 0,
4714 0, 0, 5, 2, 0, 0,
4715 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004716
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004717 0, 0, 0, 0, 0, 0,
4718 0, 0, 0, 0, 0, 0,
4719 0, 0, 0, 0, 0, 0,
4720 0, 0, 0, 0, 0, 0,
4721 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004722
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004723 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004724
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004725 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004726
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004727 LayerTestResult<T, 3> result(outputTensorInfo);
4728 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004729
4730 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4731 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4732
4733 armnn::PadQueueDescriptor descriptor;
4734
4735 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4736 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
4737 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
4738 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
4739
4740 descriptor.m_Parameters.m_PadList = PadList;
4741 armnn::WorkloadInfo info;
4742
4743 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4744 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4745
4746 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
4747
4748 inputHandle->Allocate();
4749 outputHandle->Allocate();
4750
4751 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
4752
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004753 workload->Execute();
4754
4755 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
4756
4757 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004758}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004759
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004760template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004761LayerTestResult<T, 4> Pad4dTestCommon(
4762 armnn::IWorkloadFactory& workloadFactory,
4763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4764 float qScale,
4765 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004766{
4767 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
4768 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
4769
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004770 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
4771 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004772
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004773 std::vector<T> inputValues(
4774 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004775 {
4776 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004777 0, 1,
4778 2, 3,
4779 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004780
4781 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004782 6, 7,
4783 8, 9,
4784 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004785
4786 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004787 12, 13,
4788 14, 15,
4789 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004790
4791 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004792 18, 19,
4793 20, 21,
4794 22, 23
4795 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004796
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004797 std::vector<T> expectedOutputValues(
4798 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004799 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004800 0, 0, 0, 0,
4801 0, 0, 0, 0,
4802 0, 0, 0, 0,
4803 0, 0, 0, 0,
4804 0, 0, 0, 0,
4805 0, 0, 0, 0,
4806 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004807
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004808 0, 0, 0, 0,
4809 0, 0, 0, 0,
4810 0, 0, 0, 0,
4811 0, 0, 0, 0,
4812 0, 0, 0, 0,
4813 0, 0, 0, 0,
4814 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004815
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004816 0, 0, 0, 0,
4817 0, 0, 0, 0,
4818 0, 0, 0, 0,
4819 0, 0, 0, 0,
4820 0, 0, 0, 0,
4821 0, 0, 0, 0,
4822 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004823
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004824 0, 0, 0, 0,
4825 0, 0, 0, 0,
4826 0, 0, 0, 0,
4827 0, 0, 0, 0,
4828 0, 0, 0, 0,
4829 0, 0, 0, 0,
4830 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004831
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004832 0, 0, 0, 0,
4833 0, 0, 0, 0,
4834 0, 0, 0, 0,
4835 0, 0, 0, 0,
4836 0, 0, 0, 0,
4837 0, 0, 0, 0,
4838 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004839
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004840 0, 0, 0, 0,
4841 0, 0, 0, 0,
4842 0, 0, 0, 0,
4843 0, 0, 0, 0,
4844 0, 0, 0, 0,
4845 0, 0, 0, 0,
4846 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004847
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004848 0, 0, 0, 0,
4849 0, 0, 0, 0,
4850 0, 0, 0, 0,
4851 0, 0, 0, 0,
4852 0, 0, 0, 0,
4853 0, 0, 0, 0,
4854 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004855
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004856 0, 0, 0, 0,
4857 0, 0, 0, 0,
4858 0, 0, 0, 0,
4859 0, 0, 1, 0,
4860 0, 2, 3, 0,
4861 0, 4, 5, 0,
4862 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004863
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004864 0, 0, 0, 0,
4865 0, 0, 0, 0,
4866 0, 0, 0, 0,
4867 0, 6, 7, 0,
4868 0, 8, 9, 0,
4869 0, 10, 11, 0,
4870 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004871
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004872 0, 0, 0, 0,
4873 0, 0, 0, 0,
4874 0, 0, 0, 0,
4875 0, 0, 0, 0,
4876 0, 0, 0, 0,
4877 0, 0, 0, 0,
4878 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004879
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004880 0, 0, 0, 0,
4881 0, 0, 0, 0,
4882 0, 0, 0, 0,
4883 0, 0, 0, 0,
4884 0, 0, 0, 0,
4885 0, 0, 0, 0,
4886 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004887
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004888 0, 0, 0, 0,
4889 0, 0, 0, 0,
4890 0, 0, 0, 0,
4891 0, 0, 0, 0,
4892 0, 0, 0, 0,
4893 0, 0, 0, 0,
4894 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004895
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004896 0, 0, 0, 0,
4897 0, 0, 0, 0,
4898 0, 0, 0, 0,
4899 0, 12, 13, 0,
4900 0, 14, 15, 0,
4901 0, 16, 17, 0,
4902 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004903
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004904 0, 0, 0, 0,
4905 0, 0, 0, 0,
4906 0, 0, 0, 0,
4907 0, 18, 19, 0,
4908 0, 20, 21, 0,
4909 0, 22, 23, 0,
4910 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004911
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004912 0, 0, 0, 0,
4913 0, 0, 0, 0,
4914 0, 0, 0, 0,
4915 0, 0, 0, 0,
4916 0, 0, 0, 0,
4917 0, 0, 0, 0,
4918 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004919
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004920 0, 0, 0, 0,
4921 0, 0, 0, 0,
4922 0, 0, 0, 0,
4923 0, 0, 0, 0,
4924 0, 0, 0, 0,
4925 0, 0, 0, 0,
4926 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004927
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004928 0, 0, 0, 0,
4929 0, 0, 0, 0,
4930 0, 0, 0, 0,
4931 0, 0, 0, 0,
4932 0, 0, 0, 0,
4933 0, 0, 0, 0,
4934 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004935
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004936 0, 0, 0, 0,
4937 0, 0, 0, 0,
4938 0, 0, 0, 0,
4939 0, 0, 0, 0,
4940 0, 0, 0, 0,
4941 0, 0, 0, 0,
4942 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004943
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004944 0, 0, 0, 0,
4945 0, 0, 0, 0,
4946 0, 0, 0, 0,
4947 0, 0, 0, 0,
4948 0, 0, 0, 0,
4949 0, 0, 0, 0,
4950 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004951
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004952 0, 0, 0, 0,
4953 0, 0, 0, 0,
4954 0, 0, 0, 0,
4955 0, 0, 0, 0,
4956 0, 0, 0, 0,
4957 0, 0, 0, 0,
4958 0, 0, 0, 0
4959 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004960
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004961 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004962
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004963 LayerTestResult<T, 4> result(outputTensorInfo);
4964 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004965
4966 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
4967 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
4968
4969 armnn::PadQueueDescriptor descriptor;
4970
4971 std::vector<std::pair<unsigned int, unsigned int>> PadList;
4972 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4973 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
4974 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
4975 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
4976
4977 descriptor.m_Parameters.m_PadList = PadList;
4978 armnn::WorkloadInfo info;
4979
4980 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
4981 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
4982
4983 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
4984
4985 inputHandle->Allocate();
4986 outputHandle->Allocate();
4987
4988 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
4989
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01004990 workload->Execute();
4991
4992 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
4993
4994 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01004995}
4996
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004997LayerTestResult<uint8_t, 2> PadUint82dTest(
4998 armnn::IWorkloadFactory& workloadFactory,
4999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005000{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005001 return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005002}
5003
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005004LayerTestResult<uint8_t, 3> PadUint83dTest(
5005 armnn::IWorkloadFactory& workloadFactory,
5006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005007{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005008 return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005009}
5010
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005011LayerTestResult<uint8_t, 4> PadUint84dTest(
5012 armnn::IWorkloadFactory& workloadFactory,
5013 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005014{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005015 return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005016}
5017
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005018LayerTestResult<float, 2> PadFloat322dTest(
5019 armnn::IWorkloadFactory& workloadFactory,
5020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005021{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005022 return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005023}
5024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005025LayerTestResult<float, 3> PadFloat323dTest(
5026 armnn::IWorkloadFactory& workloadFactory,
5027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005028{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005029 return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005030}
5031
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005032LayerTestResult<float, 4> PadFloat324dTest(
5033 armnn::IWorkloadFactory& workloadFactory,
5034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005035{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005036 return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005037}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005038
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005039LayerTestResult<float, 4> L2Normalization1dTest(
5040 armnn::IWorkloadFactory& workloadFactory,
5041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005042 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005043{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005044 // Width: 1
5045 // Height: 1
5046 // Channels: 10
5047 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005048 unsigned int numberOfBatches = 1;
5049 unsigned int numberOfChannels = 10;
5050 unsigned int height = 1;
5051 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00005052
jimfly013aab7c32018-11-12 13:32:08 +00005053
Nina Drozdd41b2592018-11-19 13:03:36 +00005054 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005055 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005056 std::vector<float> inputValues
5057 {
5058 // Batch 0, Channel 0, Height (1) x Width (1)
5059 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00005060
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005061 // Batch 0, Channel 1, Height (1) x Width (1)
5062 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00005063
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005064 // Batch 0, Channel 2, Height (1) x Width (1)
5065 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00005066
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005067 // Batch 0, Channel 3, Height (1) x Width (1)
5068 4.0f,
5069
5070 // Batch 0, Channel 4, Height (1) x Width (1)
5071 5.0f,
5072
5073 // Batch 0, Channel 5, Height (1) x Width (1)
5074 6.0f,
5075
5076 // Batch 0, Channel 6, Height (1) x Width (1)
5077 7.0f,
5078
5079 // Batch 0, Channel 7, Height (1) x Width (1)
5080 8.0f,
5081
5082 // Batch 0, Channel 8, Height (1) x Width (1)
5083 9.0f,
5084
5085 // Batch 0, Channel 9, Height (1) x Width (1)
5086 10.0f
5087 };
telsoa014fcda012018-03-09 14:13:49 +00005088 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005089 std::vector<float> expectedOutputValues
5090 {
5091 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00005092 1.0f * approxInvL2Norm,
5093 2.0f * approxInvL2Norm,
5094 3.0f * approxInvL2Norm,
5095 4.0f * approxInvL2Norm,
5096 5.0f * approxInvL2Norm,
5097 6.0f * approxInvL2Norm,
5098 7.0f * approxInvL2Norm,
5099 8.0f * approxInvL2Norm,
5100 9.0f * approxInvL2Norm,
5101 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005102 };
telsoa014fcda012018-03-09 14:13:49 +00005103
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005104
5105 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005106 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005107}
5108
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005109LayerTestResult<float, 4> L2Normalization2dTest(
5110 armnn::IWorkloadFactory& workloadFactory,
5111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005112 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005113{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005114 // Width: 5
5115 // Height: 1
5116 // Channels: 2
5117 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005118 unsigned int numberOfBatches = 1;
5119 unsigned int numberOfChannels = 2;
5120 unsigned int height = 1;
5121 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00005122
Nina Drozdd41b2592018-11-19 13:03:36 +00005123 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005124 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005125 std::vector<float> inputValues
5126 {
5127 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00005128 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00005129
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005130 // Batch 0, Channel 1, Height (1) x Width (5)
5131 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
5132 };
5133 std::vector<float> expectedOutputValues
5134 {
5135 // Batch 0, Channel 0, Height (1) x Width (5)
5136 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5137 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5138 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5139 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005140 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
5141
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005142 // Batch 0, Channel 1, Height (1) x Width (5)
5143 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
5144 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
5145 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
5146 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00005147 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005148 };
telsoa014fcda012018-03-09 14:13:49 +00005149
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005150 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005151 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005152}
telsoa014fcda012018-03-09 14:13:49 +00005153
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005154LayerTestResult<float, 4> L2Normalization3dTest(
5155 armnn::IWorkloadFactory& workloadFactory,
5156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005157 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005158{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005159 // Width: 3
5160 // Height: 4
5161 // Channels: 2
5162 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00005163 unsigned int numberOfBatches = 1;
5164 unsigned int numberOfChannels = 2;
5165 unsigned int height = 4;
5166 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005167
Nina Drozdd41b2592018-11-19 13:03:36 +00005168 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005169 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005170 std::vector<float> inputValues
5171 {
5172 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005173 119.0f, 21.0f, 150.0f,
5174 149.0f, 32.0f, 179.0f,
5175 15.0f, 227.0f, 141.0f,
5176 147.0f, 199.0f, 220.0f,
5177
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005178 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005179 110.0f, 140.0f, 73.0f,
5180 211.0f, 212.0f, 89.0f,
5181 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005182 162.0f, 12.0f, 161.0f
5183 };
5184 std::vector<float> expectedOutputValues
5185 {
5186 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005187 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5188 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5189 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5190 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5191 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5192 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5193 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5194 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5195 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5196 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5197 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
5198 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
5199
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005200 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005201 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
5202 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
5203 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
5204 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
5205 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
5206 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
5207 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
5208 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
5209 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
5210 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
5211 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005212 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
5213 };
telsoa014fcda012018-03-09 14:13:49 +00005214
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005215 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005216 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005217}
telsoa014fcda012018-03-09 14:13:49 +00005218
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005219LayerTestResult<float, 4> L2Normalization4dTest(
5220 armnn::IWorkloadFactory& workloadFactory,
5221 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005222 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00005223{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005224 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005225 // Height: 4
5226 // Channels: 3
5227 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00005228 unsigned int numberOfBatches = 2;
5229 unsigned int numberOfChannels = 3;
5230 unsigned int height = 4;
5231 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00005232
Nina Drozdd41b2592018-11-19 13:03:36 +00005233 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00005234 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005235 std::vector<float> inputValues
5236 {
5237 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005238 235.0f, 46.0f, 178.0f,
5239 100.0f, 123.0f, 19.0f,
5240 172.0f, 74.0f, 250.0f,
5241 6.0f, 195.0f, 80.0f,
5242
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005243 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005244 113.0f, 95.0f, 202.0f,
5245 77.0f, 114.0f, 71.0f,
5246 122.0f, 246.0f, 166.0f,
5247 82.0f, 28.0f, 37.0f,
5248
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005249 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005250 56.0f, 170.0f, 162.0f,
5251 194.0f, 89.0f, 254.0f,
5252 12.0f, 209.0f, 200.0f,
5253 1.0f, 64.0f, 54.0f,
5254
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005255 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005256 67.0f, 90.0f, 49.0f,
5257 7.0f, 163.0f, 18.0f,
5258 25.0f, 117.0f, 103.0f,
5259 247.0f, 59.0f, 189.0f,
5260
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005261 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005262 239.0f, 104.0f, 199.0f,
5263 17.0f, 124.0f, 153.0f,
5264 222.0f, 217.0f, 75.0f,
5265 32.0f, 126.0f, 21.0f,
5266
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005267 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005268 97.0f, 145.0f, 215.0f,
5269 115.0f, 116.0f, 238.0f,
5270 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005271 92.0f, 125.0f, 88.0f
5272 };
5273 std::vector<float> expectedOutputValues
5274 {
5275 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005276 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5277 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5278 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5279 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5280 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5281 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5282 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5283 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5284 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5285 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5286 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5287 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5288
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005289 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005290 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5291 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5292 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5293 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5294 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5295 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5296 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5297 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5298 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5299 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5300 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5301 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5302
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005303 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005304 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
5305 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
5306 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
5307 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
5308 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
5309 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
5310 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
5311 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
5312 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
5313 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
5314 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
5315 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
5316
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005317 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005318 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5319 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5320 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5321 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5322 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5323 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5324 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5325 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5326 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5327 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5328 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5329 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5330
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005331 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005332 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5333 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5334 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5335 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5336 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5337 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5338 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5339 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5340 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5341 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5342 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
5343 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
5344
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005345 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00005346 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
5347 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
5348 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
5349 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
5350 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
5351 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
5352 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
5353 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
5354 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
5355 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
5356 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005357 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
5358 };
telsoa014fcda012018-03-09 14:13:49 +00005359
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005360 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00005361 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00005362}
5363
5364template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005365LayerTestResult<T, 4> ConstantTestImpl(
5366 armnn::IWorkloadFactory& workloadFactory,
5367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00005368 float qScale,
5369 int32_t qOffset)
5370{
5371 constexpr unsigned int inputWidth = 3;
5372 constexpr unsigned int inputHeight = 4;
5373 constexpr unsigned int inputChannels = 3;
5374 constexpr unsigned int inputBatchSize = 2;
5375
5376 constexpr unsigned int outputWidth = inputWidth;
5377 constexpr unsigned int outputHeight = inputHeight;
5378 constexpr unsigned int outputChannels = inputChannels;
5379 constexpr unsigned int outputBatchSize = inputBatchSize;
5380
5381 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
5382 armnn::GetDataType<T>());
5383
5384 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
5385 armnn::GetDataType<T>());
5386
5387 // Set quantization parameters if the requested type is a quantized type.
5388 if(armnn::IsQuantizedType<T>())
5389 {
5390 inputTensorInfo.SetQuantizationScale(qScale);
5391 inputTensorInfo.SetQuantizationOffset(qOffset);
5392 outputTensorInfo.SetQuantizationScale(qScale);
5393 outputTensorInfo.SetQuantizationOffset(qOffset);
5394 }
5395
5396 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
5397 QuantizedVector<T>(qScale, qOffset, {
5398 // Batch 0, Channel 0
5399 235.0f, 46.0f, 178.0f,
5400 100.0f, 123.0f, 19.0f,
5401 172.0f, 74.0f, 250.0f,
5402 6.0f, 195.0f, 80.0f,
5403
5404 // Batch 0, Channel 1
5405 113.0f, 95.0f, 202.0f,
5406 77.0f, 114.0f, 71.0f,
5407 122.0f, 246.0f, 166.0f,
5408 82.0f, 28.0f, 37.0f,
5409
5410 // Batch 0, Channel 2
5411 56.0f, 170.0f, 162.0f,
5412 194.0f, 89.0f, 254.0f,
5413 12.0f, 209.0f, 200.0f,
5414 1.0f, 64.0f, 54.0f,
5415
5416 // Batch 1, Channel 0
5417 67.0f, 90.0f, 49.0f,
5418 7.0f, 163.0f, 18.0f,
5419 25.0f, 117.0f, 103.0f,
5420 247.0f, 59.0f, 189.0f,
5421
5422 // Batch 1, Channel 1
5423 239.0f, 104.0f, 199.0f,
5424 17.0f, 124.0f, 153.0f,
5425 222.0f, 217.0f, 75.0f,
5426 32.0f, 126.0f, 21.0f,
5427
5428 // Batch 1, Channel 2
5429 97.0f, 145.0f, 215.0f,
5430 115.0f, 116.0f, 238.0f,
5431 226.0f, 16.0f, 132.0f,
5432 92.0f, 125.0f, 88.0f,
5433 })));
5434
5435 LayerTestResult<T, 4> result(outputTensorInfo);
5436 result.outputExpected = input;
5437
5438 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5439
5440 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
5441 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
5442
5443 armnn::ConstantQueueDescriptor descriptor;
5444 descriptor.m_LayerOutput = &constantTensor;
5445
5446 armnn::WorkloadInfo info;
5447 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5448
5449 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
5450
5451 outputHandle->Allocate();
5452
5453 workload->Execute();
5454
5455 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5456 return result;
5457}
5458
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005459LayerTestResult<float, 4> ConstantTest(
5460 armnn::IWorkloadFactory& workloadFactory,
5461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005462{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005463 return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005464}
5465
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005466LayerTestResult<uint8_t, 4> ConstantTestUint8(
5467 armnn::IWorkloadFactory& workloadFactory,
5468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005469{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005470 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00005471}
5472
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005473LayerTestResult<uint8_t, 3> MergerUint8Test(
5474 armnn::IWorkloadFactory& workloadFactory,
5475 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005476{
surmeh013537c2c2018-05-18 16:31:43 +01005477 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00005478 unsigned int outputHeight = 6;
5479 unsigned int outputChannels = 3;
5480
surmeh013537c2c2018-05-18 16:31:43 +01005481 unsigned int inputWidth1 = 3;
5482 unsigned int inputHeight1 = 6;
5483 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00005484
surmeh013537c2c2018-05-18 16:31:43 +01005485 unsigned int inputWidth2 = 3;
5486 unsigned int inputHeight2 = 6;
5487 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00005488
telsoa01c577f2c2018-08-31 09:22:23 +01005489 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00005490 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
5491 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
5492 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00005493
telsoa01c577f2c2018-08-31 09:22:23 +01005494 // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00005495 const float scale = 0.13497836f;
5496 const int32_t offset = -7;
5497
5498 outputTensorInfo.SetQuantizationScale(scale);
5499 outputTensorInfo.SetQuantizationOffset(offset);
5500 inputTensorInfo1.SetQuantizationScale(scale);
5501 inputTensorInfo1.SetQuantizationOffset(offset);
5502 inputTensorInfo2.SetQuantizationScale(scale);
5503 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00005504
5505 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
5506
5507 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01005508 {
5509 1, 2, 3,
5510 4, 5, 6,
5511 7, 8, 9,
5512 10, 11, 12,
5513 13, 14, 15,
5514 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005515
surmeh013537c2c2018-05-18 16:31:43 +01005516 19, 20, 21,
5517 22, 23, 24,
5518 25, 26, 27,
5519 28, 29, 30,
5520 31, 32, 33,
5521 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005522
surmeh013537c2c2018-05-18 16:31:43 +01005523 37, 38, 39,
5524 40, 41, 42,
5525 43, 44, 45,
5526 46, 47, 48,
5527 49, 50, 51,
5528 52, 53, 54,
5529 })
telsoa014fcda012018-03-09 14:13:49 +00005530 );
5531
telsoa014fcda012018-03-09 14:13:49 +00005532 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
5533 {
surmeh013537c2c2018-05-18 16:31:43 +01005534 1, 2, 3,
5535 4, 5, 6,
5536 7, 8, 9,
5537 10, 11, 12,
5538 13, 14, 15,
5539 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00005540
surmeh013537c2c2018-05-18 16:31:43 +01005541 19, 20, 21,
5542 22, 23, 24,
5543 25, 26, 27,
5544 28, 29, 30,
5545 31, 32, 33,
5546 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00005547 })
5548 );
5549
5550 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
5551 {
surmeh013537c2c2018-05-18 16:31:43 +01005552 37, 38, 39,
5553 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00005554 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01005555 46, 47, 48,
5556 49, 50, 51,
5557 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00005558 })
5559 );
5560
telsoa01c577f2c2018-08-31 09:22:23 +01005561 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
telsoa014fcda012018-03-09 14:13:49 +00005562 armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
5563
telsoa01c577f2c2018-08-31 09:22:23 +01005564 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
telsoa014fcda012018-03-09 14:13:49 +00005565 armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
5566
telsoa014fcda012018-03-09 14:13:49 +00005567
5568 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5569
5570 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
5571
5572 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
5573 subTensorsSupported ?
5574 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
5575 workloadFactory.CreateTensorHandle(inputTensorInfo1);
5576
5577 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
5578 subTensorsSupported ?
5579 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
5580 workloadFactory.CreateTensorHandle(inputTensorInfo2);
5581
telsoa014fcda012018-03-09 14:13:49 +00005582
5583 armnn::MergerQueueDescriptor data;
5584 armnn::WorkloadInfo info;
5585 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5586 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00005587 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5588
5589 data.m_ViewOrigins.push_back(window1);
5590 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00005591
5592 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
5593
5594 inputHandle1->Allocate();
5595 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005596 outputHandle->Allocate();
5597
5598 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
5599 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005600
5601 workload->Execute();
5602
5603 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
5604
5605 return ret;
5606}
5607
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005608LayerTestResult<uint8_t, 4> AdditionUint8Test(
5609 armnn::IWorkloadFactory& workloadFactory,
5610 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005611{
5612 unsigned int batchSize = 1;
5613 unsigned int channels = 2;
5614 unsigned int height = 2;
5615 unsigned int width = 3;
5616
5617 const float scale = 7.0f;
5618 const int32_t offset = 3;
5619
5620 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
5621 armnn::TensorInfo outputTensorInfo;
5622
5623 const unsigned int shape[] = { batchSize, channels, height, width };
5624 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5625 inputTensorInfo1.SetQuantizationScale(scale);
5626 inputTensorInfo1.SetQuantizationOffset(offset);
5627
5628 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5629 inputTensorInfo2.SetQuantizationScale(scale);
5630 inputTensorInfo2.SetQuantizationOffset(offset);
5631
5632 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
5633 outputTensorInfo.SetQuantizationScale(scale);
5634 outputTensorInfo.SetQuantizationOffset(offset);
5635
telsoa01c577f2c2018-08-31 09:22:23 +01005636 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005637 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5638 {
5639 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
5640 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
5641 }));
5642
telsoa01c577f2c2018-08-31 09:22:23 +01005643 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005644 auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
5645 {
5646 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
5647 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
5648 }));
5649
telsoa01c577f2c2018-08-31 09:22:23 +01005650 // See dequantized values to the right.
telsoa014fcda012018-03-09 14:13:49 +00005651 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
5652 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
5653 {
5654 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
5655 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
5656 }));
5657
5658 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
5659 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
5660 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5661
5662 armnn::AdditionQueueDescriptor data;
5663 armnn::WorkloadInfo info;
5664 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5665 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
5666 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5667
5668 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
5669
5670 inputHandle1->Allocate();
5671 inputHandle2->Allocate();
5672 outputHandle->Allocate();
5673
5674 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
5675 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
5676
5677 workload->Execute();
5678
5679 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5680
5681 return result;
5682}
5683
surmeh01bceff2f2018-03-29 16:29:27 +01005684namespace
telsoa014fcda012018-03-09 14:13:49 +00005685{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005686LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(
5687 armnn::IWorkloadFactory& workloadFactory,
5688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5689 const unsigned int shape0[4],
5690 const std::vector<uint8_t> & values0,
5691 float scale0,
5692 int32_t offset0,
5693 const unsigned int shape1[4],
5694 const std::vector<uint8_t> & values1,
5695 float scale1,
5696 int32_t offset1,
5697 const unsigned int outShape[4],
5698 const std::vector<uint8_t> & outValues,
5699 float outScale,
5700 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01005701{
5702 armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
5703 armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
5704 armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00005705
surmeh01bceff2f2018-03-29 16:29:27 +01005706 inputTensorInfo0.SetQuantizationScale(scale0);
5707 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00005708
surmeh01bceff2f2018-03-29 16:29:27 +01005709 inputTensorInfo1.SetQuantizationScale(scale1);
5710 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00005711
surmeh01bceff2f2018-03-29 16:29:27 +01005712 outputTensorInfo.SetQuantizationScale(outScale);
5713 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00005714
surmeh01bceff2f2018-03-29 16:29:27 +01005715 auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
5716 auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00005717
telsoa014fcda012018-03-09 14:13:49 +00005718 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
surmeh01bceff2f2018-03-29 16:29:27 +01005719 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00005720
surmeh01bceff2f2018-03-29 16:29:27 +01005721 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00005722 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00005723 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5724
5725 armnn::MultiplicationQueueDescriptor data;
5726 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01005727 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
5728 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00005729 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5730
5731 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
5732
surmeh01bceff2f2018-03-29 16:29:27 +01005733 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005734 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00005735 outputHandle->Allocate();
5736
surmeh01bceff2f2018-03-29 16:29:27 +01005737 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005738 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00005739
5740 workload->Execute();
5741
5742 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5743
5744 return result;
5745}
surmeh01bceff2f2018-03-29 16:29:27 +01005746} // anonymous namespace
5747
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005748LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
5749 armnn::IWorkloadFactory& workloadFactory,
5750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01005751{
5752 unsigned int batchSize = 1;
5753 unsigned int channels = 2;
5754 unsigned int height = 2;
5755 unsigned int width = 3;
5756 const unsigned int shape[] = { batchSize, channels, height, width };
5757
telsoa01c577f2c2018-08-31 09:22:23 +01005758 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005759 std::vector<uint8_t> input0({
5760 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
5761 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
5762 });
5763
telsoa01c577f2c2018-08-31 09:22:23 +01005764 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005765 std::vector<uint8_t> input1({
5766 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
5767 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
5768 });
5769
telsoa01c577f2c2018-08-31 09:22:23 +01005770 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01005771 std::vector<uint8_t> output(
5772 {
5773 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
5774 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
5775 });
5776
5777 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005778 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01005779 shape,
5780 input0,
5781 4.0f,
5782 1,
5783 shape,
5784 input1,
5785 3.0f,
5786 -2,
5787 shape,
5788 output,
telsoa01c577f2c2018-08-31 09:22:23 +01005789 1366.255f, // Scale/offset chosen to have output values out of range.
surmeh01bceff2f2018-03-29 16:29:27 +01005790 -5);
5791}
5792
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005793LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
5794 armnn::IWorkloadFactory& workloadFactory,
5795 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01005796{
5797 const unsigned int shape0[] = { 1, 2, 2, 3 };
5798 const unsigned int shape1[] = { 1, 1, 1, 1 };
5799
5800 std::vector<uint8_t> input0({
5801 1, 2, 3, 4, 5, 6,
5802 7, 8, 9, 10, 11, 12
5803 });
5804
5805 std::vector<uint8_t> input1({2});
5806
5807 std::vector<uint8_t> output({
5808 2, 4, 6, 8, 10, 12,
5809 14, 16, 18, 20, 22, 24
5810 });
5811
5812 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005813 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01005814 shape0,
5815 input0,
5816 1.0f,
5817 0,
5818 shape1,
5819 input1,
5820 1.0f,
5821 0,
5822 shape0,
5823 output,
5824 1.0f,
5825 0);
5826}
5827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005828LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
5829 armnn::IWorkloadFactory& workloadFactory,
5830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01005831{
5832 const unsigned int shape0[] = { 1, 2, 2, 3 };
5833 const unsigned int shape1[] = { 1, 1, 1, 3 };
5834
5835 std::vector<uint8_t> input0({
5836 1, 2, 3, 4, 5, 6,
5837 7, 8, 9, 10, 11, 12
5838 });
5839
5840 std::vector<uint8_t> input1({1, 2, 3});
5841
5842 std::vector<uint8_t> output({
5843 1, 4, 9, 4, 10, 18,
5844 7, 16, 27, 10, 22, 36
5845 });
5846
5847 return MultiplicationUint8TestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005848 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01005849 shape0,
5850 input0,
5851 1.0f,
5852 0,
5853 shape1,
5854 input1,
5855 1.0f,
5856 0,
5857 shape0,
5858 output,
5859 1.0f,
5860 0);
5861}
telsoa014fcda012018-03-09 14:13:49 +00005862
David Beckf195f032018-09-06 16:46:34 +01005863namespace
5864{
5865template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005866LayerTestResult<T, 4> SubtractionTestHelper(
5867 armnn::IWorkloadFactory& workloadFactory,
5868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5869 const unsigned int shape0[4],
5870 const std::vector<T>& values0,
5871 float scale0,
5872 int32_t offset0,
5873 const unsigned int shape1[4],
5874 const std::vector<T> & values1,
5875 float scale1,
5876 int32_t offset1,
5877 const unsigned int outShape[4],
5878 const std::vector<T> & outValues,
5879 float outScale,
5880 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01005881{
5882 auto dataType = (std::is_same<T, uint8_t>::value ?
5883 armnn::DataType::QuantisedAsymm8 :
5884 armnn::DataType::Float32);
5885
5886 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
5887 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
5888 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
5889
5890 inputTensorInfo0.SetQuantizationScale(scale0);
5891 inputTensorInfo0.SetQuantizationOffset(offset0);
5892
5893 inputTensorInfo1.SetQuantizationScale(scale1);
5894 inputTensorInfo1.SetQuantizationOffset(offset1);
5895
5896 outputTensorInfo.SetQuantizationScale(outScale);
5897 outputTensorInfo.SetQuantizationOffset(outOffset);
5898
5899 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
5900 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
5901
5902 LayerTestResult<T, 4> result(outputTensorInfo);
5903 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
5904
5905 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
5906 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
5907 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5908
5909 armnn::SubtractionQueueDescriptor data;
5910 armnn::WorkloadInfo info;
5911 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
5912 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
5913 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
5914
5915 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
5916
5917 inputHandle0->Allocate();
5918 inputHandle1->Allocate();
5919 outputHandle->Allocate();
5920
5921 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
5922 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
5923
David Beckf195f032018-09-06 16:46:34 +01005924 workload->Execute();
5925
5926 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5927
5928 return result;
5929}
5930} // anonymous namespace
5931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005932LayerTestResult<uint8_t, 4> SubtractionUint8Test(
5933 armnn::IWorkloadFactory& workloadFactory,
5934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005935{
5936 const unsigned int shape0[] = { 1, 1, 2, 2 };
5937 const unsigned int shape1[] = { 1, 1, 2, 2 };
5938
5939 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5940 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
5941 std::vector<uint8_t> output({ 3, 3, 5, 5 });
5942
5943 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005944 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005945 shape0, input0, 0.5f, 2,
5946 shape1, input1, 1.0f, 0,
5947 shape0, output, 1.0f, 0);
5948}
5949
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005950LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
5951 armnn::IWorkloadFactory& workloadFactory,
5952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005953{
5954 const unsigned int shape0[] = { 1, 1, 2, 2 };
5955 const unsigned int shape1[] = { 1, 1, 1, 1 };
5956
5957 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5958 std::vector<uint8_t> input1({ 2 });
5959 std::vector<uint8_t> output({ 5, 6, 7, 8 });
5960
5961 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005962 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005963 shape0, input0, 0.5f, 2,
5964 shape1, input1, 1.0f, 0,
5965 shape0, output, 1.0f, 3);
5966}
5967
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005968LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
5969 armnn::IWorkloadFactory& workloadFactory,
5970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005971{
5972 const unsigned int shape0[] = { 1, 1, 2, 2 };
5973 const unsigned int shape1[] = { 1, 1, 2, 1 };
5974
5975 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
5976 std::vector<uint8_t> input1({ 2, 1 });
5977 std::vector<uint8_t> output({ 8, 11, 12, 15 });
5978
5979 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005980 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005981 shape0, input0, 1.0f, 0,
5982 shape1, input1, 1.0f, 0,
5983 shape0, output, 1.0f, 0);
5984}
5985
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005986LayerTestResult<float, 4> SubtractionTest(
5987 armnn::IWorkloadFactory& workloadFactory,
5988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01005989{
5990 const unsigned int shape0[] = { 1, 1, 2, 2 };
5991 const unsigned int shape1[] = { 1, 1, 2, 2 };
5992
5993 std::vector<float> input0({ 1, 2, 3, 4 });
5994 std::vector<float> input1({ 1, -1, 0, 2 });
5995 std::vector<float> output({ 0, 3, 3, 2 });
5996
5997 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005998 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01005999 shape0, input0, 1.0f, 0,
6000 shape1, input1, 1.0f, 0,
6001 shape0, output, 1.0f, 0);
6002}
6003
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006004LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
6005 armnn::IWorkloadFactory& workloadFactory,
6006 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006007{
6008 const unsigned int shape0[] = { 1, 1, 2, 2 };
6009 const unsigned int shape1[] = { 1, 1, 1, 1 };
6010
6011 std::vector<float> input0({ 1, 2, 3, 4 });
6012 std::vector<float> input1({ 10 });
6013 std::vector<float> output({ -9, -8, -7, -6 });
6014
6015 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006016 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006017 shape0, input0, 1.0f, 0,
6018 shape1, input1, 1.0f, 0,
6019 shape0, output, 1.0f, 0);
6020}
6021
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006022LayerTestResult<float, 4> SubtractionBroadcastTest(
6023 armnn::IWorkloadFactory& workloadFactory,
6024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01006025{
6026 const unsigned int shape0[] = { 1, 1, 2, 2 };
6027 const unsigned int shape1[] = { 1, 1, 1, 2 };
6028
6029 std::vector<float> input0({ 1, 2, 3, 4 });
6030 std::vector<float> input1({ 10, -5 });
6031 std::vector<float> output({ -9, 7, -7, 9 });
6032
6033 return SubtractionTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006034 memoryManager,
David Beckf195f032018-09-06 16:46:34 +01006035 shape0, input0, 1.0f, 0,
6036 shape1, input1, 1.0f, 0,
6037 shape0, output, 1.0f, 0);
6038}
6039
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006040LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
6041 armnn::IWorkloadFactory& workloadFactory,
6042 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006043{
6044 constexpr unsigned int inputWidth = 4;
6045 constexpr unsigned int inputHeight = 4;
6046 constexpr unsigned int inputChannels = 1;
6047 constexpr unsigned int inputBatchSize = 1;
6048
6049 constexpr unsigned int outputWidth = inputWidth;
6050 constexpr unsigned int outputHeight = inputHeight;
6051 constexpr unsigned int outputChannels = inputChannels;
6052 constexpr unsigned int outputBatchSize = inputBatchSize;
6053
6054 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6055 armnn::DataType::QuantisedAsymm8);
6056 inputTensorInfo.SetQuantizationScale(1.5f);
6057 inputTensorInfo.SetQuantizationOffset(-3);
6058
6059 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6060 armnn::DataType::QuantisedAsymm8);
6061 outputTensorInfo.SetQuantizationScale(1.5f);
6062 outputTensorInfo.SetQuantizationOffset(-3);
6063
6064 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6065 1, 2, 3, 4,
6066 2, 3, 4, 5,
6067 3, 4, 5, 6,
6068 4, 5, 6, 7
6069 }));
6070
6071 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6072 result.outputExpected = input;
6073
6074 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6075 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6076
6077 armnn::ResizeBilinearQueueDescriptor descriptor;
6078 armnn::WorkloadInfo info;
6079 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6080 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6081
6082 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6083
6084 inputHandle->Allocate();
6085 outputHandle->Allocate();
6086 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6087
6088 workload->Execute();
6089
6090 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6091 return result;
6092}
6093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006094LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
6095 armnn::IWorkloadFactory& workloadFactory,
6096 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006097{
6098 constexpr unsigned int inputWidth = 2;
6099 constexpr unsigned int inputHeight = 2;
6100 constexpr unsigned int inputChannels = 1;
6101 constexpr unsigned int inputBatchSize = 1;
6102
6103 constexpr unsigned int outputWidth = inputWidth / 2;
6104 constexpr unsigned int outputHeight = inputHeight / 2;
6105 constexpr unsigned int outputChannels = inputChannels;
6106 constexpr unsigned int outputBatchSize = inputBatchSize;
6107
6108 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6109 armnn::DataType::QuantisedAsymm8);
6110 inputTensorInfo.SetQuantizationScale(0.1567f);
6111 inputTensorInfo.SetQuantizationOffset(1);
6112
6113 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6114 armnn::DataType::QuantisedAsymm8);
6115 outputTensorInfo.SetQuantizationScale(0.1567f);
6116 outputTensorInfo.SetQuantizationOffset(1);
6117
6118 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6119 1, 255,
6120 200, 250
6121 }));
6122
6123 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
6124 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01006125 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00006126 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
6127 // the centre).
6128 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6129 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6130 1
6131 }));
6132
6133 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6134 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6135
6136 armnn::ResizeBilinearQueueDescriptor descriptor;
6137 armnn::WorkloadInfo info;
6138 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6139 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6140
6141 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6142
6143 inputHandle->Allocate();
6144 outputHandle->Allocate();
6145 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6146
6147 workload->Execute();
6148
6149 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6150 return result;
6151}
6152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006153LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
6154 armnn::IWorkloadFactory& workloadFactory,
6155 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006156{
6157 constexpr unsigned int inputWidth = 4;
6158 constexpr unsigned int inputHeight = 4;
6159 constexpr unsigned int inputChannels = 1;
6160 constexpr unsigned int inputBatchSize = 1;
6161
6162 constexpr unsigned int outputWidth = inputWidth / 2;
6163 constexpr unsigned int outputHeight = inputHeight / 2;
6164 constexpr unsigned int outputChannels = inputChannels;
6165 constexpr unsigned int outputBatchSize = inputBatchSize;
6166
6167 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6168 armnn::DataType::QuantisedAsymm8);
6169 inputTensorInfo.SetQuantizationScale(3.141592f);
6170 inputTensorInfo.SetQuantizationOffset(3);
6171
6172 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6173 armnn::DataType::QuantisedAsymm8);
6174 outputTensorInfo.SetQuantizationScale(3.141592f);
6175 outputTensorInfo.SetQuantizationOffset(3);
6176
6177 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6178 1, 2, 3, 4,
6179 2, 3, 4, 5,
6180 3, 4, 5, 6,
6181 4, 5, 6, 7
6182 }));
6183
6184 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6185 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6186 1, 3,
6187 3, 5
6188 }));
6189
6190 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6191 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6192
6193 armnn::ResizeBilinearQueueDescriptor descriptor;
6194 armnn::WorkloadInfo info;
6195 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6196 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6197
6198 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6199
6200 inputHandle->Allocate();
6201 outputHandle->Allocate();
6202 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6203
6204 workload->Execute();
6205
6206 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6207 return result;
6208}
6209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006210LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
6211 armnn::IWorkloadFactory& workloadFactory,
6212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006213{
6214 constexpr unsigned int inputWidth = 3;
6215 constexpr unsigned int inputHeight = 2;
6216 constexpr unsigned int inputChannels = 1;
6217 constexpr unsigned int inputBatchSize = 1;
6218
6219 constexpr unsigned int outputWidth = 2;
6220 constexpr unsigned int outputHeight = 1;
6221 constexpr unsigned int outputChannels = inputChannels;
6222 constexpr unsigned int outputBatchSize = inputBatchSize;
6223
6224 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6225 armnn::DataType::QuantisedAsymm8);
6226 inputTensorInfo.SetQuantizationScale(1.5f);
6227 inputTensorInfo.SetQuantizationOffset(-1);
6228
6229 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6230 armnn::DataType::QuantisedAsymm8);
6231 outputTensorInfo.SetQuantizationScale(1.5f);
6232 outputTensorInfo.SetQuantizationOffset(-1);
6233
6234 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6235 1, 2, 3, // 3.0, 4.5, 6.0
6236 5, 8, 13 // 9.0, 13.5, 21.0
6237 }));
6238
6239 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6240 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6241 1, 3 // 3.0, 5.25
6242 }));
6243
6244 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6245 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6246
6247 armnn::ResizeBilinearQueueDescriptor descriptor;
6248 armnn::WorkloadInfo info;
6249 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6250 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6251
6252 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6253
6254 inputHandle->Allocate();
6255 outputHandle->Allocate();
6256
6257 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6258
6259 workload->Execute();
6260
6261 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6262 return result;
6263}
6264
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006265LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
6266 armnn::IWorkloadFactory& workloadFactory,
6267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006268{
6269 constexpr unsigned int inputWidth = 2;
6270 constexpr unsigned int inputHeight = 3;
6271 constexpr unsigned int inputChannels = 1;
6272 constexpr unsigned int inputBatchSize = 1;
6273
6274 constexpr unsigned int outputWidth = 5;
6275 constexpr unsigned int outputHeight = 3;
6276 constexpr unsigned int outputChannels = inputChannels;
6277 constexpr unsigned int outputBatchSize = inputBatchSize;
6278
6279 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6280 armnn::DataType::QuantisedAsymm8);
6281 inputTensorInfo.SetQuantizationScale(0.010765f);
6282 inputTensorInfo.SetQuantizationOffset(7);
6283
6284 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6285 armnn::DataType::QuantisedAsymm8);
6286 outputTensorInfo.SetQuantizationScale(0.010132f);
6287 outputTensorInfo.SetQuantizationOffset(-18);
6288
6289 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
6290 24, 228, // 0.183005, 2.379065,
6291 105, 128, // 1.05497, 1.302565
6292 230, 71 // 2.400595, 0.68896
6293 }));
6294
6295 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
6296 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
6297 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
6298 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
6299 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
6300 }));
6301
6302 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6303 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6304
6305 armnn::ResizeBilinearQueueDescriptor descriptor;
6306 armnn::WorkloadInfo info;
6307 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6308 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6309
6310 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
6311
6312 inputHandle->Allocate();
6313 outputHandle->Allocate();
6314 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
6315
6316 workload->Execute();
6317
6318 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6319 return result;
6320}
6321
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006322LayerTestResult<float, 4> BatchNormTest(
6323 armnn::IWorkloadFactory& workloadFactory,
6324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006325{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006326 // BatchSize: 1
6327 // Channels: 2
6328 // Height: 3
6329 // Width: 2
6330
6331 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6332 std::vector<float> inputValues
6333 {
6334 // Batch 0, Channel 0, Height (3) x Width (2)
6335 1.f, 4.f,
6336 4.f, 2.f,
6337 1.f, 6.f,
6338
6339 // Batch 0, Channel 1, Height (3) x Width (2)
6340 1.f, 1.f,
6341 4.f, 1.f,
6342 -2.f, 4.f
6343 };
6344 std::vector<float> expectedOutputValues
6345 {
6346 // Batch 0, Channel 0, Height (3) x Width (2)
6347 1.f, 4.f,
6348 4.f, 2.f,
6349 1.f, 6.f,
6350
6351 // Batch 0, Channel 1, Height (3) x Width (2)
6352 3.f, 3.f,
6353 4.f, 3.f,
6354 2.f, 4.f
6355 };
6356
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006357 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6358 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006359 0.f, 0, armnn::DataLayout::NCHW);
6360}
6361
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006362LayerTestResult<float, 4> BatchNormNhwcTest(
6363 armnn::IWorkloadFactory& workloadFactory,
6364 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006365{
6366 // BatchSize: 1
6367 // Height: 3
6368 // Width: 2
6369 // Channels: 2
6370
6371 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6372 std::vector<float> inputValues
6373 {
6374 // Batch 0, Height 0, Width (2) x Channel (2)
6375 1.f, 1.f,
6376 4.f, 1.f,
6377
6378 // Batch 0, Height 1, Width (2) x Channel (2)
6379 4.f, 4.f,
6380 2.f, 1.f,
6381
6382 // Batch 0, Height 2, Width (2) x Channel (2)
6383 1.f, -2.f,
6384 6.f, 4.f
6385 };
6386 std::vector<float> expectedOutputValues
6387 {
6388 // Batch 0, Height 0, Width (2) x Channel (2)
6389 1.f, 3.f,
6390 4.f, 3.f,
6391
6392 // Batch 0, Height 1, Width (2) x Channel (2)
6393 4.f, 4.f,
6394 2.f, 3.f,
6395
6396 // Batch 0, Height 2, Width (2) x Channel (2)
6397 1.f, 2.f,
6398 6.f, 4.f
6399 };
6400
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006401 return BatchNormTestImpl<float>(workloadFactory, memoryManager,
6402 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006403 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006404}
6405
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006406LayerTestResult<uint8_t, 4> BatchNormUint8Test(
6407 armnn::IWorkloadFactory& workloadFactory,
6408 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006409{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006410 // BatchSize: 1
6411 // Channels: 2
6412 // Height: 3
6413 // Width: 2
6414
6415 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
6416 std::vector<float> inputValues
6417 {
6418 // Batch 0, Channel 0, Height (3) x Width (2)
6419 1.f, 4.f,
6420 4.f, 2.f,
6421 1.f, 6.f,
6422
6423 // Batch 0, Channel 1, Height (3) x Width (2)
6424 1.f, 1.f,
6425 4.f, 1.f,
6426 -2.f, 4.f
6427 };
6428 std::vector<float> expectedOutputValues
6429 {
6430 // Batch 0, Channel 0, Height (3) x Width (2)
6431 1.f, 4.f,
6432 4.f, 2.f,
6433 1.f, 6.f,
6434
6435 // Batch 0, Channel 1, Height (3) x Width (2)
6436 3.f, 3.f,
6437 4.f, 3.f,
6438 2.f, 4.f
6439 };
6440
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006441 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6442 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006443 1.f/20.f, 50, armnn::DataLayout::NCHW);
6444}
6445
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006446LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6447 armnn::IWorkloadFactory& workloadFactory,
6448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006449{
6450 // BatchSize: 1
6451 // Height: 3
6452 // Width: 2
6453 // Channels: 2
6454
6455 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
6456 std::vector<float> inputValues
6457 {
6458 // Batch 0, Height 0, Width (2) x Channel (2)
6459 1.f, 1.f,
6460 4.f, 1.f,
6461
6462 // Batch 0, Height 1, Width (2) x Channel (2)
6463 4.f, 4.f,
6464 2.f, 1.f,
6465
6466 // Batch 0, Height 2, Width (2) x Channel (2)
6467 1.f, -2.f,
6468 6.f, 4.f
6469 };
6470 std::vector<float> expectedOutputValues
6471 {
6472 // Batch 0, Height 0, Width (2) x Channel (2)
6473 1.f, 3.f,
6474 4.f, 3.f,
6475
6476 // Batch 0, Height 1, Width (2) x Channel (2)
6477 4.f, 4.f,
6478 2.f, 3.f,
6479
6480 // Batch 0, Height 2, Width (2) x Channel (2)
6481 1.f, 2.f,
6482 6.f, 4.f
6483 };
6484
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006485 return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
6486 inputOutputShape, inputValues, expectedOutputValues,
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01006487 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00006488}
6489
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006490LayerTestResult<uint8_t, 4> ConstantUint8Test(
6491 armnn::IWorkloadFactory& workloadFactory,
6492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006493{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006494 return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00006495}
6496
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006497LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
6498 armnn::IWorkloadFactory& workloadFactory,
6499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006500{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006501 return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006502}
6503
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006504LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
6505 armnn::IWorkloadFactory& workloadFactory,
6506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006507{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006508 return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006509}
6510
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006511LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
6512 armnn::IWorkloadFactory& workloadFactory,
6513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006514{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006515 return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006516}
6517
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006518LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
6519 armnn::IWorkloadFactory& workloadFactory,
6520 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006521{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006522 return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006523}
6524
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006525LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
6526 armnn::IWorkloadFactory& workloadFactory,
6527 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006528{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006529 return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006530}
6531
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006532LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
6533 armnn::IWorkloadFactory& workloadFactory,
6534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006535{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006536 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006537}
6538
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006539LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
6540 armnn::IWorkloadFactory& workloadFactory,
6541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006542{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006543 return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006544}
6545
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006546LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
6547 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6549 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00006550{
narpra015cdda352018-11-19 15:30:27 +00006551 return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006552}
6553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006554LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
6555 armnn::IWorkloadFactory& workloadFactory,
6556 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006557{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006558 return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006559}
6560
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006561LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
6562 armnn::IWorkloadFactory& workloadFactory,
6563 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006564{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006565 return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00006566}
6567
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006568LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
6569 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00006570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6571 bool useSubtensor)
6572{
6573 return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
6574}
6575
6576LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
6577 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006579{
narpra015cdda352018-11-19 15:30:27 +00006580 return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6581}
6582
6583LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
6584 armnn::IWorkloadFactory& workloadFactory,
6585 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6586{
6587 return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6588}
6589
6590LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
6591 armnn::IWorkloadFactory& workloadFactory,
6592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6593{
6594 return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6595}
6596
6597LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
6598 armnn::IWorkloadFactory& workloadFactory,
6599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
6600{
6601 return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
6602}
6603
6604LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
6605 armnn::IWorkloadFactory& workloadFactory,
6606 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6607{
6608 return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6609}
6610
6611LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
6612 armnn::IWorkloadFactory& workloadFactory,
6613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6614{
6615 return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6616}
6617
6618LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
6619 armnn::IWorkloadFactory& workloadFactory,
6620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6621{
6622 return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
6623}
6624
6625LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
6626 armnn::IWorkloadFactory& workloadFactory,
6627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6628 bool useSubtensor)
6629{
6630 return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00006631}
6632
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006633LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
6634 armnn::IWorkloadFactory& workloadFactory,
6635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6636 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006637{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006638 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00006639}
6640
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006641LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
6642 armnn::IWorkloadFactory& workloadFactory,
6643 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6644 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006645{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006646 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
6647 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00006648}
6649
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006650LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
6651 armnn::IWorkloadFactory& workloadFactory,
6652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6653 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006654{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006655 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00006656}
6657
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006658LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
6659 armnn::IWorkloadFactory& workloadFactory,
6660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6661 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00006662{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006663 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
6664 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00006665}
6666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006667LayerTestResult<float, 4> SimpleMaxPooling2dTest(
6668 armnn::IWorkloadFactory& workloadFactory,
6669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006670 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006671{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006672 return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006673}
6674
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006675LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
6676 armnn::IWorkloadFactory& workloadFactory,
6677 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006678 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01006679{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006680 return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01006681}
6682
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006683LayerTestResult<float, 4> SimpleAveragePooling2dTest(
6684 armnn::IWorkloadFactory& workloadFactory,
6685 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006686 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006687{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006688 return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01006689}
6690
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006691LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
6692 armnn::IWorkloadFactory& workloadFactory,
6693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006694 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01006695{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006696 return SimpleAveragePooling2dTestCommon<uint8_t>(
6697 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00006698}
6699
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006700LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
6701 armnn::IWorkloadFactory& workloadFactory,
6702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6703 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01006704{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006705 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
6706 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01006707}
6708
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006709LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
6710 armnn::IWorkloadFactory& workloadFactory,
6711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006712{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006713 return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006714}
6715
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006716LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
6717 armnn::IWorkloadFactory& workloadFactory,
6718 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006719{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006720 return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00006721}
6722
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006723LayerTestResult<float, 4> SimpleL2Pooling2dTest(
6724 armnn::IWorkloadFactory& workloadFactory,
6725 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006726 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006727{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006728 return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006729}
6730
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006731LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
6732 armnn::IWorkloadFactory& workloadFactory,
6733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006734 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00006735{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006736 return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00006737}
6738
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006739LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
6740 armnn::IWorkloadFactory& workloadFactory,
6741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006742{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006743 return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006744}
6745
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006746LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
6747 armnn::IWorkloadFactory& workloadFactory,
6748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006749{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006750 return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006751}
6752
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006753LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
6754 armnn::IWorkloadFactory& workloadFactory,
6755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006756{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006757 return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006758}
6759
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006760LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
6761 armnn::IWorkloadFactory& workloadFactory,
6762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006763{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006764 return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006765}
6766
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006767LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
6768 armnn::IWorkloadFactory& workloadFactory,
6769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006770{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006771 return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006772}
6773
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006774LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
6775 armnn::IWorkloadFactory& workloadFactory,
6776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006777{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006778 return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006779}
6780
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006781LayerTestResult<float, 4> L2Pooling2dSize7Test(
6782 armnn::IWorkloadFactory& workloadFactory,
6783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006784{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006785 return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006786}
6787
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006788LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
6789 armnn::IWorkloadFactory& workloadFactory,
6790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006791{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006792 return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006793}
6794
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006795LayerTestResult<float, 4> L2Pooling2dSize9Test(
6796 armnn::IWorkloadFactory& workloadFactory,
6797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006798{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006799 return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006800}
6801
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006802LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
6803 armnn::IWorkloadFactory& workloadFactory,
6804 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006805{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006806 return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006807}
6808
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006809LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
6810 armnn::IWorkloadFactory& workloadFactory,
6811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006812{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006813 return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006814}
6815
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006816LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
6817 armnn::IWorkloadFactory& workloadFactory,
6818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006819{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006820 return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006821}
6822
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006823LayerTestResult<float, 4> ComparePooling2dTest(
6824 armnn::IWorkloadFactory& workloadFactory,
6825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6826 armnn::IWorkloadFactory& refWorkloadFactory,
6827 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00006828{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006829 return ComparePooling2dTestCommon<float>(
6830 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00006831}
6832
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006833LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
6834 armnn::IWorkloadFactory& workloadFactory,
6835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6836 armnn::IWorkloadFactory& refWorkloadFactory,
6837 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00006838{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006839 return ComparePooling2dTestCommon<uint8_t>(
6840 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00006841}
6842
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006843LayerTestResult<float, 2> FullyConnectedLargeTest(
6844 armnn::IWorkloadFactory& workloadFactory,
6845 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6846 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00006847{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006848 return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00006849}
6850
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006851LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
6852 armnn::IWorkloadFactory& workloadFactory,
6853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006854{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006855 return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006856}
6857
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006858LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
6859 armnn::IWorkloadFactory& workloadFactory,
6860 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006861{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006862 return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00006863}
6864
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006865LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
6866 armnn::IWorkloadFactory& workloadFactory,
6867 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006868{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006869 return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006870}
6871
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006872LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
6873 armnn::IWorkloadFactory& workloadFactory,
6874 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006875{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006876 return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00006877}
6878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006879LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
6880 armnn::IWorkloadFactory& workloadFactory,
6881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006882{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006883 return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006884}
6885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006886LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
6887 armnn::IWorkloadFactory& workloadFactory,
6888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006889{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006890 return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006891}
6892
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006893LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
6894 armnn::IWorkloadFactory& workloadFactory,
6895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006896{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006897 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006898}
6899
6900LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006901 armnn::IWorkloadFactory& workloadFactory,
6902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006903{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006904 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006905}
6906
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006907LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
6908 armnn::IWorkloadFactory& workloadFactory,
6909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006910{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006911 return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006912}
6913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006914LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
6915 armnn::IWorkloadFactory& workloadFactory,
6916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006917{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006918 return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006919}
6920
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006921LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
6922 armnn::IWorkloadFactory& workloadFactory,
6923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006924{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006925 return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006926}
6927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006928LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
6929 armnn::IWorkloadFactory& workloadFactory,
6930 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006931{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006932 return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006933}
6934
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006935LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
6936 armnn::IWorkloadFactory& workloadFactory,
6937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006938{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006939 return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006940}
6941
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006942LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
6943 armnn::IWorkloadFactory& workloadFactory,
6944 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006945{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006946 return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006947}
6948
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006949LayerTestResult<float, 4> SimplePermuteFloat32Test(
6950 armnn::IWorkloadFactory& workloadFactory,
6951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006952{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006953 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006954};
6955
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006956LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
6957 armnn::IWorkloadFactory& workloadFactory,
6958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006959{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006960 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00006961};
surmeh01bceff2f2018-03-29 16:29:27 +01006962
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006963LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
6964 armnn::IWorkloadFactory& workloadFactory,
6965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006966{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006967 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006968};
6969
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006970LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
6971 armnn::IWorkloadFactory& workloadFactory,
6972 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006973{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006974 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01006975};
6976
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006977LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
6978 armnn::IWorkloadFactory& workloadFactory,
6979 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01006980{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006981 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01006982};
6983
6984namespace
6985{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01006986
narpra011e4c31d2018-09-28 11:07:51 +01006987template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006988LayerTestResult<T, OutputDim> MeanTestHelper(
6989 armnn::IWorkloadFactory& workloadFactory,
6990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6991 const unsigned int* inputShape,
6992 const std::vector<T>& inputData,
6993 const std::vector<unsigned int>& axis,
6994 bool keepDims,
6995 const unsigned int* outputShape,
6996 const std::vector<T>& outputData,
6997 float scale = 1.0f,
6998 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01006999{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007000 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01007001
7002 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7003 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7004
7005 inputTensorInfo.SetQuantizationScale(scale);
7006 inputTensorInfo.SetQuantizationOffset(offset);
7007
7008 outputTensorInfo.SetQuantizationScale(scale);
7009 outputTensorInfo.SetQuantizationOffset(offset);
7010
7011 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7012
7013 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7014 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7015
7016 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7017 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7018
7019 armnn::MeanQueueDescriptor data;
7020 data.m_Parameters.m_Axis = axis;
7021 data.m_Parameters.m_KeepDims = keepDims;
7022 armnn::WorkloadInfo info;
7023 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7024 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7025
7026 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
7027
7028 inputHandle->Allocate();
7029 outputHandle->Allocate();
7030
7031 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7032
narpra011e4c31d2018-09-28 11:07:51 +01007033 workload->Execute();
7034
7035 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
7036
7037 return result;
7038}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007039
narpra011e4c31d2018-09-28 11:07:51 +01007040} // anonymous namespace
7041
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007042LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
7043 armnn::IWorkloadFactory& workloadFactory,
7044 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007045{
7046 const unsigned int inputShape[] = { 3, 2 };
7047 const unsigned int outputShape[] = { 1 };
7048
7049 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7050 std::vector<uint8_t> output({ 2 });
7051
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007052 return MeanTestHelper<uint8_t, 2, 1>(
7053 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007054}
7055
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007056LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
7057 armnn::IWorkloadFactory& workloadFactory,
7058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007059{
7060 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7061 const unsigned int outputShape[] = { 1, 1, 2 };
7062
7063 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7064 std::vector<uint8_t> output({ 2, 2 });
7065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007066 return MeanTestHelper<uint8_t, 4, 3>(
7067 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007068}
7069
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007070LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
7071 armnn::IWorkloadFactory& workloadFactory,
7072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007073{
7074 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7075 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7076
7077 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
7078 std::vector<uint8_t> output({ 2, 2 });
7079
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007080 return MeanTestHelper<uint8_t, 4, 4>(
7081 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007082}
7083
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007084LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
7085 armnn::IWorkloadFactory& workloadFactory,
7086 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007087{
7088 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7089 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7090
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007091 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01007092 std::vector<uint8_t> output({ 1, 3, 5 });
7093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007094 return MeanTestHelper<uint8_t, 4, 4>(
7095 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007096}
7097
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007098LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
7099 armnn::IWorkloadFactory& workloadFactory,
7100 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007101{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007102 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007103 const unsigned int outputShape[] = { 2 };
7104
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007105 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
7106 24 });
7107 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01007108
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007109 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
7110 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007111 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01007112}
7113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007114LayerTestResult<float, 1> MeanFloatSimpleTest(
7115 armnn::IWorkloadFactory& workloadFactory,
7116 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007117{
7118 const unsigned int inputShape[] = { 3, 2 };
7119 const unsigned int outputShape[] = { 1 };
7120
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007121 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7122 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007123
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007124 return MeanTestHelper<float, 2, 1>(
7125 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007126}
7127
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007128LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
7129 armnn::IWorkloadFactory& workloadFactory,
7130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007131{
7132 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7133 const unsigned int outputShape[] = { 3, 1, 2 };
7134
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007135 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7136 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007137
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007138 return MeanTestHelper<float, 4, 3>(
7139 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007140}
7141
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007142LayerTestResult<float, 4> MeanFloatKeepDimsTest(
7143 armnn::IWorkloadFactory& workloadFactory,
7144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007145{
7146 const unsigned int inputShape[] = { 1, 1, 3, 2 };
7147 const unsigned int outputShape[] = { 1, 1, 1, 2 };
7148
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007149 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
7150 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007151
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007152 return MeanTestHelper<float, 4, 4>(
7153 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007154}
7155
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007156LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
7157 armnn::IWorkloadFactory& workloadFactory,
7158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007159{
7160 const unsigned int inputShape[] = { 2, 3, 1, 2 };
7161 const unsigned int outputShape[] = { 1, 3, 1, 1 };
7162
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007163 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
7164 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007165
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007166 return MeanTestHelper<float, 4, 4>(
7167 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007168}
7169
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007170LayerTestResult<float, 1> MeanVtsFloat1Test(
7171 armnn::IWorkloadFactory& workloadFactory,
7172 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007173{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007174 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01007175 const unsigned int outputShape[] = { 2 };
7176
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007177 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7178 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7179 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01007180
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007181 return MeanTestHelper<float, 3, 1>(
7182 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007183}
7184
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007185LayerTestResult<float, 3> MeanVtsFloat2Test(
7186 armnn::IWorkloadFactory& workloadFactory,
7187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01007188{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007189 const unsigned int inputShape[] = { 4, 3, 2 };
7190 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01007191
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007192 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
7193 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
7194 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01007195
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007196 return MeanTestHelper<float, 3, 3>(
7197 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007198}
7199
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007200LayerTestResult<float, 3> MeanVtsFloat3Test(
7201 armnn::IWorkloadFactory& workloadFactory,
7202 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01007203{
7204 const unsigned int inputShape[] = { 1, 2, 2, 1 };
7205 const unsigned int outputShape[] = { 1, 2, 1 };
7206
7207 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
7208 std::vector<float> output({ 1.5f, 3.5f });
7209
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007210 return MeanTestHelper<float, 4, 3>(
7211 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01007212}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007213
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007214LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
7215 armnn::IWorkloadFactory& workloadFactory,
7216 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007217{
7218 // Create Initial Tensor
7219 // 1, 2, 3
7220 // 4, 5, 6
7221 // 7, 8, 9
7222
7223 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
7224 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
7225
7226 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
7227 {1, 2, 3,
7228 4, 5, 6,
7229 7, 8, 9
7230 });
7231
7232 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
7233 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
7234 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
7235 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
7236
7237 // Apply MaxPool poolSize = 1x1, stride=2x2
7238 // Result =
7239 // 1, 3
7240 // 7, 9
7241 armnn::Pooling2dDescriptor descriptor;
7242 descriptor.m_PoolHeight = 1;
7243 descriptor.m_PoolWidth = 1;
7244 descriptor.m_StrideX = 2;
7245 descriptor.m_StrideY = 2;
7246 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
7247
7248 armnn::Pooling2dQueueDescriptor queueDescriptor;
7249 queueDescriptor.m_Parameters = descriptor;
7250 armnn::WorkloadInfo workloadInfo;
7251 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
7252 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
7253
7254 // Create the MaxPool
7255 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
7256
7257 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
7258 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
7259 boost::multi_array<float, 4> resultMaxPool;
7260 resultMaxPool.resize(shape);
7261
7262
7263 // Create addition with another tensor the same size
7264 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
7265 // with the initial tensor.
7266 // 12, 16
7267 // 24, 28
7268
7269 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7270 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
7271
7272 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
7273 {12, 16,
7274 24, 28,
7275 });
7276
7277 // Expected output tensor after MaxPool and Addition.
7278 LayerTestResult<float,4> addRet(addOutputTensorInfo);
7279 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
7280 {
7281 13, 19,
7282 31, 37
7283 }));
7284
7285 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
7286 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
7287
7288 armnn::AdditionQueueDescriptor data;
7289 armnn::WorkloadInfo info;
7290
7291 // Add the output of the MaxPool and the new tensor
7292 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
7293 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
7294 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
7295
7296 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
7297
7298 poolingInputHandle->Allocate();
7299 poolingOutputHandle->Allocate();
7300 addInputHandle->Allocate();
7301 addOutputHandle->Allocate();
7302
7303 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
7304 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
7305
7306 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
7307 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
7308
7309 workload->Execute();
7310 addWorkload->Execute();
7311
7312 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
7313
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01007314 return addRet;
7315}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007316
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007317LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
7318 armnn::IWorkloadFactory& workloadFactory,
7319 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007320{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007321 return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007322}
7323
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007324LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
7325 armnn::IWorkloadFactory& workloadFactory,
7326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007327{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007328 return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007329}
7330
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007331LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
7332 armnn::IWorkloadFactory& workloadFactory,
7333 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007334{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007335 return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007336}
7337
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007338LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
7339 armnn::IWorkloadFactory& workloadFactory,
7340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007341{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007342 return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007343}
7344
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007345LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
7346 armnn::IWorkloadFactory& workloadFactory,
7347 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007348{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007349 return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007350}
7351
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007352LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
7353 armnn::IWorkloadFactory& workloadFactory,
7354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007355{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007356 return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007357}
7358
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007359LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
7360 armnn::IWorkloadFactory& workloadFactory,
7361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007362{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007363 return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007364}
7365
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007366LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
7367 armnn::IWorkloadFactory& workloadFactory,
7368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007369{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007370 return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007371}
7372
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007373LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
7374 armnn::IWorkloadFactory& workloadFactory,
7375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007376{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007377 return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007378}
7379
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007380LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
7381 armnn::IWorkloadFactory& workloadFactory,
7382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007383{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007384 return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007385}
7386
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007387LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
7388 armnn::IWorkloadFactory& workloadFactory,
7389 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007390{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007391 return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007392}
7393
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007394LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
7395 armnn::IWorkloadFactory& workloadFactory,
7396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007397{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007398 return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007399}
7400
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007401LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
7402 armnn::IWorkloadFactory& workloadFactory,
7403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007404{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007405 return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007406}
7407
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007408LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
7409 armnn::IWorkloadFactory& workloadFactory,
7410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007411{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007412 return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007413}
7414
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007415LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
7416 armnn::IWorkloadFactory& workloadFactory,
7417 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007418{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007419 return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007420}
7421
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007422LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
7423 armnn::IWorkloadFactory& workloadFactory,
7424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007425{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007426 return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00007427}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007428
7429namespace {
7430
7431template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007432LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
7433 armnn::IWorkloadFactory &workloadFactory,
7434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7435 const armnn::DataLayout& dataLayout,
7436 const unsigned int *inputShape,
7437 const std::vector<T> &inputData,
7438 const std::vector<unsigned int> &blockShape,
7439 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
7440 const unsigned int *outputShape,
7441 const std::vector<T> &outputData,
7442 float scale = 1.0f,
7443 int32_t offset = 0)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007444 {
7445 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
7446
7447 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
7448 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
7449
7450 inputTensorInfo.SetQuantizationScale(scale);
7451 inputTensorInfo.SetQuantizationOffset(offset);
7452
7453 outputTensorInfo.SetQuantizationScale(scale);
7454 outputTensorInfo.SetQuantizationOffset(offset);
7455
7456 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
7457
7458 LayerTestResult<T, OutputDim> result(outputTensorInfo);
7459 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
7460
7461 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7462 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7463
7464 armnn::BatchToSpaceNdQueueDescriptor data;
7465 data.m_Parameters.m_DataLayout = dataLayout;
7466 data.m_Parameters.m_BlockShape = blockShape;
7467 data.m_Parameters.m_Crops = crops;
7468 armnn::WorkloadInfo info;
7469 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
7470 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7471
7472 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
7473
7474 inputHandle->Allocate();
7475 outputHandle->Allocate();
7476
7477 CopyDataToITensorHandle(inputHandle.get(), input.origin());
7478
7479 workload->Execute();
7480
7481 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7482
7483 return result;
7484}
7485
7486} // anonymous namespace
7487
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007488LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
7489 armnn::IWorkloadFactory& workloadFactory,
7490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007491{
7492 const unsigned int inputShape[] = {4, 2, 2, 1};
7493 const unsigned int outputShape[] = {1, 4, 4, 1 };
7494
7495 std::vector<float> input
7496 ({
7497 // Batch 0, Height 0, Width (2) x Channel (1)
7498 1.0f, 3.0f,
7499 // Batch 0, Height 1, Width (2) x Channel (1)
7500 9.0f, 11.0f,
7501
7502
7503 // Batch 1, Height 0, Width (2) x Channel (1)
7504 2.0f, 4.0f,
7505 // Batch 1, Height 1, Width (2) x Channel (1)
7506 10.0f, 12.0f,
7507
7508
7509 // Batch 2, Height 0, Width (2) x Channel (1)
7510 5.0f, 7.0f,
7511 // Batch 2, Height 1, Width (2) x Channel (1)
7512 13.0f, 15.0f,
7513
7514 // Batch 3, Height 0, Width (2) x Channel (3)
7515 6.0f, 8.0f,
7516 // Batch 3, Height 1, Width (2) x Channel (1)
7517 14.0f, 16.0f
7518 });
7519
7520 std::vector<float> expectedOutput
7521 ({
7522 1.0f, 2.0f, 3.0f, 4.0f,
7523 5.0f, 6.0f, 7.0f, 8.0f,
7524 9.0f, 10.0f, 11.0f, 12.0f,
7525 13.0f, 14.0f, 15.0f, 16.0f
7526 });
7527
7528 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007529 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007530
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007531 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7532 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007533 crops, outputShape, expectedOutput);
7534}
7535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007536LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
7537 armnn::IWorkloadFactory& workloadFactory,
7538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007539{
7540 const unsigned int inputShape[] = {4, 1, 1, 1};
7541 const unsigned int outputShape[] = {1, 2, 2, 1};
7542
7543 std::vector<float> input
7544 ({
7545 // Batch 0, Height 0, Width (2) x Channel (1)
7546 1.0f, 2.0f, 3.0f, 4.0f
7547 });
7548
7549 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7550
7551 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007552 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007553
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007554 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7555 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7556 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007557}
7558
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007559LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
7560 armnn::IWorkloadFactory& workloadFactory,
7561 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007562{
7563 const unsigned int inputShape[] = {4, 1, 1, 3};
7564 const unsigned int outputShape[] = {1, 2, 2, 3};
7565
7566 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7567
7568 std::vector<float> expectedOutput({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7569
7570 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007571 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007572
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007573 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7574 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7575 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007576}
7577
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007578LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
7579 armnn::IWorkloadFactory &workloadFactory,
7580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007581{
7582 const unsigned int inputShape[] = {4, 3, 1, 1};
7583 const unsigned int outputShape[] = {1, 3, 2, 2};
7584
7585 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f });
7586
7587 std::vector<float> expectedOutput
7588 ({
7589 // Batch 0, Channel 0, Height (2) x Width (2)
7590 1.0f, 4.0f,
7591 7.0f, 10.0f,
7592
7593 // Batch 0, Channel 1, Height (2) x Width (2)
7594 2.0f, 5.0f,
7595 8.0f, 11.0f,
7596
7597 // Batch 0, Channel 2, Height (2) x Width (2)
7598 3.0f, 6.0f,
7599 9.0f, 12.0f,
7600 });
7601
7602 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00007603 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007604
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007605 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7606 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7607 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00007608}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007609
Mike Kelly831faed2018-11-28 11:52:08 +00007610LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
7611 armnn::IWorkloadFactory& workloadFactory,
7612 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7613{
7614 const unsigned int inputShape[] = {4, 1, 1, 1};
7615 const unsigned int outputShape[] = {1, 1, 2, 2};
7616
7617 std::vector<float> input
7618 ({
7619 // Batch 0, Height 0, Width (2) x Channel (1)
7620 1.0f, 2.0f, 3.0f, 4.0f
7621 });
7622
7623 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
7624
7625 std::vector<unsigned int> blockShape({2, 2});
7626 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7627
7628 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7629 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7630 crops, outputShape, expectedOutput);
7631}
7632
7633LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
7634 armnn::IWorkloadFactory& workloadFactory,
7635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7636{
7637 const unsigned int inputShape[] = {4, 3, 1, 1};
7638 const unsigned int outputShape[] = {1, 3, 2, 2};
7639
7640 std::vector<float> input({ 1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f });
7641
7642 std::vector<float> expectedOutput
7643 ({
7644 // Batch 0, Channel 0, Height (2) x Width (2)
7645 1.0f, 7.0f,
7646 2.0f, 8.0f,
7647
7648 // Batch 0, Channel 1, Height (2) x Width (2)
7649 3.0f, 9.0f,
7650 4.0f, 10.0f,
7651
7652 // Batch 0, Channel 2, Height (2) x Width (2)
7653 5.0f, 11.0f,
7654 6.0f, 12.0f,
7655 });
7656
7657 std::vector<unsigned int> blockShape({2, 2});
7658 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7659
7660 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
7661 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7662 crops, outputShape, expectedOutput);
7663}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007664
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007665LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
7666 armnn::IWorkloadFactory& workloadFactory,
7667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00007668{
7669 const unsigned int inputShape[] = {4, 2, 2, 1};
7670 const unsigned int outputShape[] = {1, 4, 4, 1};
7671
7672 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 });
7673 std::vector<uint8_t> expectedOutput({ 1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
7674
7675 std::vector<unsigned int> blockShape({2, 2});
7676 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7677
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00007678 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
7679 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007680}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00007681
7682LayerTestResult<float, 4> StridedSlice4DFloat32Test(
7683 armnn::IWorkloadFactory& workloadFactory,
7684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7685{
7686 return StridedSlice4DTest<float>(workloadFactory, memoryManager);
7687}
7688
7689LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
7690 armnn::IWorkloadFactory& workloadFactory,
7691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7692{
7693 return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
7694}
7695
7696LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
7697 armnn::IWorkloadFactory& workloadFactory,
7698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7699{
7700 return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
7701}
7702
7703LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
7704 armnn::IWorkloadFactory& workloadFactory,
7705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7706{
7707 return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
7708}
7709
7710LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
7711 armnn::IWorkloadFactory& workloadFactory,
7712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7713{
7714 return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
7715}
7716
7717LayerTestResult<float, 3> StridedSlice3DFloat32Test(
7718 armnn::IWorkloadFactory& workloadFactory,
7719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7720{
7721 return StridedSlice3DTest<float>(workloadFactory, memoryManager);
7722}
7723
7724LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
7725 armnn::IWorkloadFactory& workloadFactory,
7726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7727{
7728 return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
7729}
7730
7731LayerTestResult<float, 2> StridedSlice2DFloat32Test(
7732 armnn::IWorkloadFactory& workloadFactory,
7733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7734{
7735 return StridedSlice2DTest<float>(workloadFactory, memoryManager);
7736}
7737
7738LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
7739 armnn::IWorkloadFactory& workloadFactory,
7740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7741{
7742 return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
7743}
7744
7745LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
7746 armnn::IWorkloadFactory& workloadFactory,
7747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7748{
7749 return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
7750}
7751
7752LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
7753 armnn::IWorkloadFactory& workloadFactory,
7754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7755{
7756 return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
7757}
7758
7759LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
7760 armnn::IWorkloadFactory& workloadFactory,
7761 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7762{
7763 return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
7764}
7765
7766LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
7767 armnn::IWorkloadFactory& workloadFactory,
7768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7769{
7770 return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
7771}
7772
7773LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
7774 armnn::IWorkloadFactory& workloadFactory,
7775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7776{
7777 return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
7778}
7779
7780LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
7781 armnn::IWorkloadFactory& workloadFactory,
7782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7783{
7784 return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
7785}
7786
7787LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
7788 armnn::IWorkloadFactory& workloadFactory,
7789 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7790{
7791 return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
7792}
7793
7794LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
7795 armnn::IWorkloadFactory& workloadFactory,
7796 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7797{
7798 return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
7799}
7800
7801LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
7802 armnn::IWorkloadFactory& workloadFactory,
7803 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7804{
7805 return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
7806}
Mike Kelly831faed2018-11-28 11:52:08 +00007807LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
7808 armnn::IWorkloadFactory& workloadFactory,
7809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7810{
7811 const unsigned int inputShape[] = {4, 1, 1, 1};
7812 const unsigned int outputShape[] = {1, 2, 2, 1};
7813
7814 std::vector<uint8_t> input
7815 ({
7816 // Batch 0, Height 0, Width (2) x Channel (1)
7817 1, 2, 3, 4
7818 });
7819
7820 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
7821
7822 std::vector<unsigned int> blockShape({2, 2});
7823 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7824
7825 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7826 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7827 crops, outputShape, expectedOutput);
7828}
7829
7830LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
7831 armnn::IWorkloadFactory& workloadFactory,
7832 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7833{
7834 const unsigned int inputShape[] = {4, 1, 1, 3};
7835 const unsigned int outputShape[] = {1, 2, 2, 3};
7836
7837 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
7838
7839 std::vector<uint8_t> expectedOutput({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
7840
7841 std::vector<unsigned int> blockShape({2, 2});
7842 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7843
7844 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7845 armnn::DataLayout::NHWC, inputShape, input, blockShape,
7846 crops, outputShape, expectedOutput);
7847}
7848
7849
7850LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
7851 armnn::IWorkloadFactory &workloadFactory,
7852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7853{
7854 const unsigned int inputShape[] = {4, 3, 1, 1};
7855 const unsigned int outputShape[] = {1, 3, 2, 2};
7856
7857 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 });
7858
7859 std::vector<uint8_t> expectedOutput
7860 ({
7861 // Batch 0, Channel 0, Height (2) x Width (2)
7862 1, 4,
7863 7, 10,
7864
7865 // Batch 0, Channel 1, Height (2) x Width (2)
7866 2, 5,
7867 8, 11,
7868
7869 // Batch 0, Channel 2, Height (2) x Width (2)
7870 3, 6,
7871 9, 12,
7872 });
7873
7874 std::vector<unsigned int> blockShape({2, 2});
7875 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7876
7877 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7878 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7879 crops, outputShape, expectedOutput);
7880}
7881
7882LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
7883 armnn::IWorkloadFactory& workloadFactory,
7884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7885{
7886 const unsigned int inputShape[] = {4, 1, 1, 1};
7887 const unsigned int outputShape[] = {1, 1, 2, 2};
7888
7889 std::vector<uint8_t> input
7890 ({
7891 // Batch 0, Height 0, Width (2) x Channel (1)
7892 1, 2, 3, 4
7893 });
7894
7895 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
7896
7897 std::vector<unsigned int> blockShape({2, 2});
7898 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7899
7900 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7901 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7902 crops, outputShape, expectedOutput);
7903}
7904
7905LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
7906 armnn::IWorkloadFactory& workloadFactory,
7907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7908{
7909 const unsigned int inputShape[] = {4, 3, 1, 1};
7910 const unsigned int outputShape[] = {1, 3, 2, 2};
7911
7912 std::vector<uint8_t> input({ 1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12 });
7913
7914 std::vector<uint8_t> expectedOutput
7915 ({
7916 // Batch 0, Channel 0, Height (2) x Width (2)
7917 1, 7,
7918 2, 8,
7919
7920 // Batch 0, Channel 1, Height (2) x Width (2)
7921 3, 9,
7922 4, 10,
7923
7924 // Batch 0, Channel 2, Height (2) x Width (2)
7925 5, 11,
7926 6, 12,
7927 });
7928 std::vector<unsigned int> blockShape({2, 2});
7929 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
7930
7931 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
7932 armnn::DataLayout::NCHW, inputShape, input, blockShape,
7933 crops, outputShape, expectedOutput);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00007934}
7935
7936LayerTestResult<float, 4> Debug4DFloat32Test(
7937 armnn::IWorkloadFactory& workloadFactory,
7938 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7939{
7940 return Debug4DTest<float>(workloadFactory, memoryManager);
7941}
7942
7943LayerTestResult<float, 3> Debug3DFloat32Test(
7944 armnn::IWorkloadFactory& workloadFactory,
7945 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7946{
7947 return Debug3DTest<float>(workloadFactory, memoryManager);
7948}
7949
7950LayerTestResult<float, 2> Debug2DFloat32Test(
7951 armnn::IWorkloadFactory& workloadFactory,
7952 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7953{
7954 return Debug2DTest<float>(workloadFactory, memoryManager);
7955}
7956
7957LayerTestResult<float, 1> Debug1DFloat32Test(
7958 armnn::IWorkloadFactory& workloadFactory,
7959 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7960{
7961 return Debug1DTest<float>(workloadFactory, memoryManager);
7962}
7963
7964LayerTestResult<uint8_t, 4> Debug4DUint8Test(
7965 armnn::IWorkloadFactory& workloadFactory,
7966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7967{
7968 return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
7969}
7970
7971LayerTestResult<uint8_t, 3> Debug3DUint8Test(
7972 armnn::IWorkloadFactory& workloadFactory,
7973 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7974{
7975 return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
7976}
7977
7978LayerTestResult<uint8_t, 2> Debug2DUint8Test(
7979 armnn::IWorkloadFactory& workloadFactory,
7980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7981{
7982 return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
7983}
7984
7985LayerTestResult<uint8_t, 1> Debug1DUint8Test(
7986 armnn::IWorkloadFactory& workloadFactory,
7987 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7988{
7989 return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
7990}