blob: bc8e13d1aecd3e35248411a0e0c30bf33f37926d [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#include "LayerTests.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00007#include "TensorUtils.hpp"
Aron Virginas-Tard4f0fea2019-04-09 14:08:06 +01008#include <ResolveType.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
10#include "test/TensorHelpers.hpp"
11#include "TensorCopyUtils.hpp"
surmeh013537c2c2018-05-18 16:31:43 +010012#include "Permute.hpp"
telsoa014fcda012018-03-09 14:13:49 +000013
14#include <boost/test/unit_test.hpp>
surmeh013537c2c2018-05-18 16:31:43 +010015#include <boost/assert.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
David Beck711fa312018-09-24 10:46:38 +010017#include <armnn/LayerSupport.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000022
Éanna Ó Catháinde705582018-12-03 13:04:22 +000023#include <reference/workloads/RefWorkloads.hpp>
24
telsoa014fcda012018-03-09 14:13:49 +000025#include <algorithm>
26#include <boost/cast.hpp>
27
28#include "WorkloadTestUtils.hpp"
29#include "Conv2dTestImpl.hpp"
30#include "BatchNormTestImpl.hpp"
31#include "ActivationTestImpl.hpp"
32#include "Pooling2dTestImpl.hpp"
Nina Drozd8ed4b8c2019-05-29 10:41:04 +010033#include "FloorTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000034#include "FullyConnectedTestImpl.hpp"
narpra014951d842019-01-18 16:53:53 +000035#include "GatherTestImpl.hpp"
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +000036#include "SpaceToBatchNdTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000037#include "SplitterTestImpl.hpp"
38#include "SoftmaxTestImpl.hpp"
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +000039#include "StridedSliceTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#include "NormTestImpl.hpp"
41#include "PermuteTestImpl.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +010042#include "LstmTestImpl.hpp"
43#include "ConvertFp16ToFp32TestImpl.hpp"
44#include "ConvertFp32ToFp16TestImpl.hpp"
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +000045#include "DebugTestImpl.hpp"
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +000046#include "DequantizeTestImpl.hpp"
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +010047#include "QuantizeTestImpl.hpp"
telsoa014fcda012018-03-09 14:13:49 +000048
telsoa01c577f2c2018-08-31 09:22:23 +010049// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000050static std::vector<float> ConvInput3x8x16({
51 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
52 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
53 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
54 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
55 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
56 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
57 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
58 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
59 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
60 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
61 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
62 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
63 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
75});
76
telsoa01c577f2c2018-08-31 09:22:23 +010077// 2-channel bias used by a number of Conv2d tests.
telsoa014fcda012018-03-09 14:13:49 +000078static std::vector<float> Bias2({0, 2});
79
telsoa01c577f2c2018-08-31 09:22:23 +010080// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly9b398322019-05-22 17:21:49 +010082boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 if(biasEnabled)
85 {
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000086 armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
Mike Kelly9b398322019-05-22 17:21:49 +010087 boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
telsoa014fcda012018-03-09 14:13:49 +000088 return bias;
89 }
90 else
91 {
92 return boost::multi_array<T, 1>();
93 }
94}
95
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000096template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000097LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
98 armnn::IWorkloadFactory& workloadFactory,
99 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
100 float qScale,
101 int32_t qOffset,
102 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000103 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000104{
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000106 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000107 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
108
telsoa01c577f2c2018-08-31 09:22:23 +0100109 // Use a 2-element batch with 3-channel 3x5 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000110 armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000111 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
112 QuantizedVector<T>(qScale, qOffset, {
113 1, 1, 1,
114 1, -1, 1,
115 1, 1, 1,
116 1, 1, 1,
117 1, 1, 1,
118
119 0, 0, 0,
120 0, 0, 0,
121 0, 0, 0,
122 0, 0, 0,
123 0, 0, 0,
124
125 2, 2, 2,
126 2, 2, 2,
127 2, 2, 2,
128 2, 2, 2,
129 2, 2, 2,
130
131
132 0, 0, 0,
133 0, 0, 0,
134 0, 0, 0,
135 0, 0, 0,
136 0, 0, 0,
137
138 1, 1, 1,
139 1, 1, 1,
140 1, 1, 1,
141 1, 1, 1,
142 1, 1, 1,
143
144 0, 0, 0,
145 0, 0, 0,
146 0, 0, 0,
147 0, 0, 0,
148 0, 0, 0
149 })));
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000152 armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000153 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
154 QuantizedVector<T>(qScale, qOffset, {
155 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
156 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
157 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
158 -23.5f, -23.5f, -23.5f,
159 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
160 -23.5f, -23.5f, -23.5f,
161
162 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
163 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
164 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
165 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
166 })));
167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000168 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
169 workloadFactory,
170 memoryManager,
171 input,
172 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100173 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000174 expectedOutput,
175 qScale,
176 qOffset,
177 layout);
telsoa014fcda012018-03-09 14:13:49 +0000178}
179
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000180template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
181 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000182LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
183 armnn::IWorkloadFactory& workloadFactory,
184 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
185 float qScale,
186 int32_t qOffset,
187 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000188 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000189{
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
telsoa014fcda012018-03-09 14:13:49 +0000191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Use common single-batch 3-channel 16x8 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000193 armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000194 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000197 armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000198 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
199 QuantizedVector<T>(qScale, qOffset, {
200 1, 1, 1,
201 1, -1, 1,
202 1, 1, 1,
203
204 0, 0, 0,
205 0, 0, 0,
206 0, 0, 0,
207
208 2, 2, 2,
209 2, 2, 2,
210 2, 2, 2,
211
212
213 0, 0, 0,
214 0, 0, 0,
215 0, 0, 0,
216
217 1, 1, 1,
218 1, 1, 1,
219 1, 1, 1,
220
221 0, 0, 0,
222 0, 0, 0,
223 0, 0, 0
224 })));
225
telsoa01c577f2c2018-08-31 09:22:23 +0100226 // Expected output is 1 batch of a 2-channel 14x6 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000227 armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000228 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
229 QuantizedVector<T>(qScale, qOffset, {
230 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
231 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
232 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
233 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
234 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
235 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
236
237 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
238 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
239 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
240 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
241 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
242 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
243 })));
244
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000245 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
246 workloadFactory,
247 memoryManager,
248 input,
249 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100250 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251 expectedOutput,
252 qScale,
253 qOffset,
254 layout);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000257template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
259 armnn::IWorkloadFactory& workloadFactory,
260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
261 float qScale,
262 int32_t qOffset,
263 bool biasEnabled,
264 armnn::DataLayout dataLayout)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100265{
266 // Use common single-batch 5x5 image.
267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100269 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
270 {
271 1, 5, 2, 3,
272 8, 7, 3, 6,
273 3, 3, 9, 1
274 });
275
276
277 // Use a 2-element batch of 3-channel 3x3 kernels.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000278 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100279 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
280 4, 5, 6,
281 0, 0, 0,
282 3, 2, 1
283 });
284
285 // Expected output is 1 batch of a 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000286 armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100287
288 const std::vector<float> outputData =
289 {
290 23, 41, 33, 21,
291 44, 65, 76, 52,
292 82, 85, 79, 42
293 };
294
295 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
296
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000297 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
298 workloadFactory,
299 memoryManager,
300 input,
301 kernel,
302 boost::multi_array<T, 1>(),
303 expectedOutput,
304 dataLayout,
305 qScale,
306 qOffset);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100307}
308
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000309template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Mike Kelly7332ed82018-12-20 17:03:06 +0000310LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
313 float qScale,
314 int32_t qOffset,
315 bool biasEnabled,
316 const armnn::DataLayout& dataLayout)
317{
318 // Input is a single-batch, 1 channel, 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000320 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
321 {
322 1, 5, 2, 3, 5,
323 8, 7, 3, 6, 3,
324 3, 3, 9, 1, 9,
325 4, 1, 8, 1, 3,
326 6, 8, 1, 9, 2
327 });
328
329 // Use a 3x3 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000330 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000331 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
332 {
333 4, 5, 6,
334 0, 0, 0,
335 3, 2, 1
336 });
337
338 // Expected output is a single-batch, 1 channel, 3x3 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
Mike Kelly7332ed82018-12-20 17:03:06 +0000340
341 const std::vector<T> outputData =
342 {
343 23, 33, 24,
344 91, 99, 48,
345 26, 50, 19
346 };
347
348 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
349
350 uint32_t padLeft = 1;
351 uint32_t padTop = 1;
352 uint32_t padRight = 1;
353 uint32_t padBottom = 1;
354 uint32_t strideX = 2;
355 uint32_t strideY = 2;
356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000357 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
358 workloadFactory,
359 memoryManager,
360 input,
361 kernel,
362 boost::multi_array<T, 1>(),
363 expectedOutput,
364 dataLayout,
365 qScale,
366 qOffset,
367 padLeft,
368 padTop,
369 padRight,
370 padBottom,
371 strideX,
372 strideY);
Mike Kelly7332ed82018-12-20 17:03:06 +0000373}
374
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000375LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
376 armnn::IWorkloadFactory& workloadFactory,
377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
378 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000379 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000381 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
382 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000383}
384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000385LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
386 armnn::IWorkloadFactory& workloadFactory,
387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
388 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000389 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000390{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
392 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000393}
394
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000399 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000400{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000401 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
402 workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000403}
404
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000405LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
406 armnn::IWorkloadFactory& workloadFactory,
407 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
408 bool biasEnabled)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100409{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000410 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
411 workloadFactory,
412 memoryManager,
413 0.f,
414 0,
415 biasEnabled,
416 armnn::DataLayout::NHWC);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100417}
418
Mike Kelly7332ed82018-12-20 17:03:06 +0000419LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
420 armnn::IWorkloadFactory& workloadFactory,
421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
422 bool biasEnabled,
423 const armnn::DataLayout layout)
424{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000425 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
426 workloadFactory,
427 memoryManager,
428 0.f,
429 0,
430 biasEnabled,
431 layout);
Mike Kelly7332ed82018-12-20 17:03:06 +0000432}
433
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
435 armnn::IWorkloadFactory& workloadFactory,
436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
437 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000438 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000439{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000440 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
441 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000442}
443
Mike Kelly2f80f6e2019-05-16 12:41:34 +0100444LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
447 bool biasEnabled,
448 const armnn::DataLayout layout)
449{
450return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
451 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
452}
453
454LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
455 armnn::IWorkloadFactory& workloadFactory,
456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
457 bool biasEnabled,
458 const armnn::DataLayout layout)
459{
460 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
461 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
462}
463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
465 typename T = armnn::ResolveType<ArmnnType>>
telsoa014fcda012018-03-09 14:13:49 +0000466LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
467 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000470 float qScale,
471 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000472{
telsoa01c577f2c2018-08-31 09:22:23 +0100473 // Use a single-batch 1-channel 3x3 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000474 armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000475 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
476 QuantizedVector<T>(qScale, qOffset, {
477 11,21,31,
478 12,22,32,
479 13,23,33
480 })));
481
telsoa01c577f2c2018-08-31 09:22:23 +0100482 // Use 1 batch of a 1-channel 2x2 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000483 armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000484 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
485 QuantizedVector<T>(qScale, qOffset, {
486 -11,-21,
487 -12,-22,
488 })));
489
telsoa01c577f2c2018-08-31 09:22:23 +0100490// Expected output is 1 batch of a 1-channel 6x8 image.
telsoa014fcda012018-03-09 14:13:49 +0000491// Manually calculated like this:
492//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
493//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
494//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
495//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
496//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
497//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
498//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000500 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
501 QuantizedVector<T>(qScale, qOffset, {
502 0, 0, 0, 0, 0, 0,
503 -242, -594, -934, -372, 0, 0,
504 -495, -1190, -1850, -725, 0, 0,
505 -538, -1256, -1916, -748, 0, 0,
506 -273, -626, -946, -363, 0, 0,
507 0, 0, 0, 0, 0, 0,
508 0, 0, 0, 0, 0, 0,
509 0, 0, 0, 0, 0, 0
510 })));
511
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000512 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
513 workloadFactory,
514 memoryManager,
515 input,
516 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100517 GetBias2<ArmnnBType>(false, qScale * qScale),
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 expectedOutput,
519 qScale,
520 qOffset,
521 layout,
522 1, // Padding left.
523 2, // Padding top.
524 3, // Padding right.
525 4); // Padding bottom.
telsoa014fcda012018-03-09 14:13:49 +0000526}
527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
529 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000530LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
531 armnn::IWorkloadFactory& workloadFactory,
532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000533 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000534 float qScale,
535 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000536{
telsoa01c577f2c2018-08-31 09:22:23 +0100537 // Use a single-batch 1-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000539 boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
540 QuantizedVector<T>(qScale, qOffset, {
541 11,21,31,41,51,
542 12,22,32,42,52,
543 13,23,33,43,53,
544 14,24,34,44,54,
545 15,25,35,45,55,
546 })));
547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Use 1 batch of a 1-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000549 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000550 boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
551 QuantizedVector<T>(qScale, qOffset, {
552 -11,-21,-31,-41,
553 -12,-22,-32,-42,
554 -13,-23,-33,-43,
555 -14,-24,-34,-44,
556 })));
557
telsoa01c577f2c2018-08-31 09:22:23 +0100558 // Expected output is 1 batch of a 1-channel 5x5 image.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000559 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000560 std::vector<T> myVec(outputDesc.GetNumElements(), 0);
561 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
562 QuantizedVector<T>(qScale, qOffset, {
telsoa014fcda012018-03-09 14:13:49 +0000563 -7140, -10580, -13940, -9300, -5230,
564 -9590, -14120, -18520, -12290, -6860,
565 -9980, -14560, -18960, -12560, -7000,
566 -7518, -10904, -14144, -9318, -5152,
surmeh013537c2c2018-05-18 16:31:43 +0100567 -5032, -7256, -9376, -6142, -3368,
telsoa014fcda012018-03-09 14:13:49 +0000568 })));
569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
571 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000572 memoryManager,
telsoa014fcda012018-03-09 14:13:49 +0000573 input,
574 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100575 GetBias2<ArmnnBType>(false, qScale * qScale),
telsoa014fcda012018-03-09 14:13:49 +0000576 expectedOutput,
577 qScale,
578 qOffset,
narpra015f703182018-10-26 16:24:58 +0100579 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100580 1, // Padding left.
581 1, // Padding top.
582 2, // Padding right.
583 2); // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100584}
585
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000586template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
587 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000588LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
589 armnn::IWorkloadFactory& workloadFactory,
590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
591 float qScale,
592 int32_t qOffset,
593 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000594 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100595{
telsoa01c577f2c2018-08-31 09:22:23 +0100596 // Use a single-batch 2-channel 5x5 image as input.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000597 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100598 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
599 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
600 0, 1, 2, 3, 4,
601 5, 6, 7, 8, 9,
602 10, 11, 12, 13, 14,
603 15, 16, 17, 18, 19,
604 20, 21, 22, 23, 24,
605
606 25, 26, 27, 28, 29,
607 30, 31, 32, 33, 34,
608 35, 36, 37, 38, 39,
609 40, 41, 42, 43, 44,
610 45, 46, 47, 48, 49
611 })));
612
telsoa01c577f2c2018-08-31 09:22:23 +0100613 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000614 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100615 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
616 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
617 32, 31, 30, 29,
618 28, 27, 26, 25,
619 24, 23, 22, 21,
620 20, 19, 18, 17,
621
622 16, 15, 14, 13,
623 12, 11, 10, 9,
624 8, 7, 6, 5,
625 4, 3, 2, 1
626 })));
627
telsoa01c577f2c2018-08-31 09:22:23 +0100628 // Expected output is 1 batch of a 2-channel 5x5 image.
629 // Calculated using the python tensorflow library with strideX=1, strideY=1.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
surmeh013537c2c2018-05-18 16:31:43 +0100631 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
632 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
633 1062, 1580, 1850, 1530, 1117,
634 2140, 3108, 3500, 2842, 2042,
635 3580, 5068, 5460, 4342, 3062,
636 3618, 5072, 5390, 4248, 2971,
637 3074, 4282, 4510, 3533, 2457,
638 1550, 2284, 2362, 1955, 1428,
639 2910, 4206, 4342, 3528, 2536,
640 3390, 4886, 5022, 4068, 2916,
641 3566, 5056, 5182, 4133, 2922,
642 3100, 4352, 4452, 3517, 2465
643 })));
644
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000645 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
646 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000647 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +0100648 input,
649 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100650 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
surmeh013537c2c2018-05-18 16:31:43 +0100651 expectedOutput,
652 qScale,
653 qOffset,
jimfly01382a91d2018-10-26 15:55:50 +0100654 layout,
telsoa01c577f2c2018-08-31 09:22:23 +0100655 1, // Padding left.
656 1, // Padding top.
657 2, // Padding right.
658 2, // Padding bottom.
surmeh013537c2c2018-05-18 16:31:43 +0100659 1, // strideX
660 1); // strideY
telsoa014fcda012018-03-09 14:13:49 +0000661}
662
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000663template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
664 typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
666 armnn::IWorkloadFactory& workloadFactory,
667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
668 float qScale,
669 int32_t qOffset,
670 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100671{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000672 armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100673 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
674 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
675 0, 25,
676 1, 26,
677 2, 27,
678 3, 28,
679 4, 29,
680
681 5, 30,
682 6, 31,
683 7, 32,
684 8, 33,
685 9, 34,
686
687 10, 35,
688 11, 36,
689 12, 37,
690 13, 38,
691 14, 39,
692
693 15, 40,
694 16, 41,
695 17, 42,
696 18, 43,
697 19, 44,
698
699 20, 45,
700 21, 46,
701 22, 47,
702 23, 48,
703 24, 49
704 })));
705
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000706 armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100707 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
708 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
Matteo Martincigh747ef822018-12-18 09:26:39 +0000709 32, 31, 30, 29,
710 28, 27, 26, 25,
711 24, 23, 22, 21,
712 20, 19, 18, 17,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100713
Matteo Martincigh747ef822018-12-18 09:26:39 +0000714 16, 15, 14, 13,
715 12, 11, 10, 9,
716 8, 7, 6, 5,
717 4, 3, 2, 1
Nikhil Rajcec6b652018-10-12 13:51:57 +0100718 })));
719
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000720 armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100721 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
722 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
723 1062, 1550,
724 1580, 2284,
725 1850, 2362,
726 1530, 1955,
727 1117, 1428,
728
729 2140, 2910,
730 3108, 4206,
731 3500, 4342,
732 2842, 3528,
733 2042, 2536,
734
735 3580, 3390,
736 5068, 4886,
737 5460, 5022,
738 4342, 4068,
739 3062, 2916,
740
741 3618, 3566,
742 5072, 5056,
743 5390, 5182,
744 4248, 4133,
745 2971, 2922,
746
747 3074, 3100,
748 4282, 4352,
749 4510, 4452,
750 3533, 3517,
751 2457, 2465
752 })));
753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000754 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
755 workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000756 memoryManager,
Nikhil Rajcec6b652018-10-12 13:51:57 +0100757 input,
758 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100759 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Nikhil Rajcec6b652018-10-12 13:51:57 +0100760 expectedOutput,
761 qScale,
762 qOffset,
763 1, // Padding left.
764 1, // Padding top.
765 2, // Padding right.
766 2, // Padding bottom.
767 1, // strideX
768 1); // strideY
769}
770
Bruno Goncalves22972f02019-04-26 21:03:24 -0300771template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
772 typename T = armnn::ResolveType<ArmnnType>>
773LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
774 armnn::IWorkloadFactory& workloadFactory,
775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
776 float qScale,
777 int32_t qOffset,
778 bool biasEnabled)
779{
780 armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType);
781 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
782 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
783 0, 0, 0, 0, 0, 0, 0, 0, 0,
784 0, 0, 0, 0, 0, 0, 0, 0, 0,
785 0, 0, 0, 0, 0, 0, 0, 0, 0,
786 0, 0, 0, 1, 1, 1, 0, 0, 0,
787 0, 0, 0, 1, 1, 1, 0, 0, 0,
788 0, 0, 0, 1, 1, 1, 0, 0, 0,
789 0, 0, 0, 0, 0, 0, 0, 0, 0,
790 0, 0, 0, 0, 0, 0, 0, 0, 0,
791 0, 0, 0, 0, 0, 0, 0, 0, 0
792 })));
793
794 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
795 auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
796 QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
797 1, 2, 3,
798 4, 5, 6,
799 7, 8, 9
800 })));
801
802 uint32_t padLeft = 0;
803 uint32_t padTop = 0;
804 uint32_t padRight = 0;
805 uint32_t padBottom = 0;
806 uint32_t strideX = 1;
807 uint32_t strideY = 1;
808 uint32_t dilationX = 3;
809 uint32_t dilationY = 3;
810
811 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
812 armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType);
813 boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
814 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
815 5, 5, 5,
816 5, 5, 5,
817 5, 5, 5
818 })));
819
820 return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
821 workloadFactory,
822 memoryManager,
823 input,
824 kernel,
Mike Kelly9b398322019-05-22 17:21:49 +0100825 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
Bruno Goncalves22972f02019-04-26 21:03:24 -0300826 expectedOutput,
827 qScale,
828 qOffset,
829 padLeft,
830 padTop,
831 padRight,
832 padBottom,
833 strideX,
834 strideY,
835 dilationX,
836 dilationY);
837
838}
839
telsoa014fcda012018-03-09 14:13:49 +0000840LayerTestResult<float, 4>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000841Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
842 armnn::IWorkloadFactory& workloadFactory,
843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000844 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000845{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000846 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
847 <armnn::DataType::Float32, armnn::DataType::Float32>(
848 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000849}
850
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000851LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
852 armnn::IWorkloadFactory& workloadFactory,
853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000854 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000855{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000856 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000857 workloadFactory, memoryManager, layout, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +0000858}
859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000860LayerTestResult<float, 4> DepthwiseConvolution2dTest(
861 armnn::IWorkloadFactory& workloadFactory,
862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
863 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000864 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000865{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000866 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000868}
869
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000870LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
871 armnn::IWorkloadFactory& workloadFactory,
872 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
873 bool biasEnabled)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100874{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000875 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
876 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100877}
878
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000879LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
880 armnn::IWorkloadFactory& workloadFactory,
881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
882 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000883 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000884{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000885 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000887}
888
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000889LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
890 armnn::IWorkloadFactory& workloadFactory,
891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
892 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000893 const armnn::DataLayout layout)
surmeh013537c2c2018-05-18 16:31:43 +0100894{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000895 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000896 workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
surmeh013537c2c2018-05-18 16:31:43 +0100897}
898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000899LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
900 armnn::IWorkloadFactory& workloadFactory,
901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
902 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000903 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000904{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000905 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000906 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000907}
908
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000909LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
910 armnn::IWorkloadFactory& workloadFactory,
911 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
912 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000913 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000914{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000915 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000916 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
telsoa014fcda012018-03-09 14:13:49 +0000917}
918
Bruno Goncalves22972f02019-04-26 21:03:24 -0300919LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
920 armnn::IWorkloadFactory& workloadFactory,
921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
922{
923 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
924 workloadFactory,
925 memoryManager,
926 0.f,
927 0,
928 false);
929}
930
Ruomei Yan88d44b82019-05-23 14:29:06 +0100931LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
932 armnn::IWorkloadFactory& workloadFactory,
933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
934 bool biasEnabled,
935 const armnn::DataLayout layout)
936{
937 return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
938 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
939}
940
941LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
942 armnn::IWorkloadFactory& workloadFactory,
943 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
944 bool biasEnabled,
945 const armnn::DataLayout layout)
946{
947 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedSymm16, armnn::DataType::Signed32>(
948 workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
949}
950
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000951LayerTestResult<float, 4> Convolution1dTest(
952 armnn::IWorkloadFactory& workloadFactory,
953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
954 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000955{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000956 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
957 workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000958}
959
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000960LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
961 armnn::IWorkloadFactory& workloadFactory,
962 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
963 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000964{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000965 return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
966 workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
telsoa014fcda012018-03-09 14:13:49 +0000967}
968
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000969LayerTestResult<float,4> CompareConvolution2dTest(
970 armnn::IWorkloadFactory& workloadFactory,
971 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
972 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +0000973{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000974 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
975 workloadFactory, memoryManager, refWorkloadFactory);
telsoa014fcda012018-03-09 14:13:49 +0000976}
977
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000978LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000979 armnn::IWorkloadFactory& workloadFactory,
980 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
981 armnn::IWorkloadFactory& refWorkloadFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000982 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000983{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000984 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
985 workloadFactory, memoryManager, refWorkloadFactory, layout);
telsoa014fcda012018-03-09 14:13:49 +0000986}
987
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000988LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
989 armnn::IWorkloadFactory& workloadFactory,
990 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
991 armnn::IWorkloadFactory& refWorkloadFactory,
992 const armnn::DataLayout layout)
993{
994 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
995 workloadFactory, memoryManager, refWorkloadFactory, layout);
996}
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000997
998LayerTestResult<float,4> SimpleNormalizationAcrossTest(
999 armnn::IWorkloadFactory& workloadFactory,
1000 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001001{
1002 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1003 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001004 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001005}
1006
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001007LayerTestResult<float,4> SimpleNormalizationWithinTest(
1008 armnn::IWorkloadFactory& workloadFactory,
1009 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001010{
1011 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1012 auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001013 return SimpleNormalizationTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001014}
1015
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001016LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(
1017 armnn::IWorkloadFactory& workloadFactory,
1018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra0155a97bc2018-10-02 14:35:53 +01001019{
1020 auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1021 auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001022 return SimpleNormalizationNhwcTestImpl(workloadFactory, memoryManager, normChannel, normMethod);
narpra0155a97bc2018-10-02 14:35:53 +01001023}
1024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001025LayerTestResult<float,2> SimpleSoftmaxTest(
1026 armnn::IWorkloadFactory& workloadFactory,
1027 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1028 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001029{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001030 return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001031}
1032
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001033LayerTestResult<float,3> Simple3dSoftmaxTest(
1034 armnn::IWorkloadFactory& workloadFactory,
1035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1036 float beta)
1037{
1038 return Simple3dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1039}
1040
1041LayerTestResult<float,4> Simple4dSoftmaxTest(
1042 armnn::IWorkloadFactory& workloadFactory,
1043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1044 float beta)
1045{
1046 return Simple4dSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
1047}
1048
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001049LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
1050 armnn::IWorkloadFactory& workloadFactory,
1051 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1052 float beta)
telsoa014fcda012018-03-09 14:13:49 +00001053{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001054 return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
telsoa014fcda012018-03-09 14:13:49 +00001055}
1056
Narumol Prangnawarat65d30962019-03-14 11:55:03 +00001057LayerTestResult<uint8_t,3> Simple3dSoftmaxUint8Test(
1058 armnn::IWorkloadFactory& workloadFactory,
1059 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1060 float beta)
1061{
1062 return Simple3dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1063}
1064
1065LayerTestResult<uint8_t,4> Simple4dSoftmaxUint8Test(
1066 armnn::IWorkloadFactory& workloadFactory,
1067 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1068 float beta)
1069{
1070 return Simple4dSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
1071}
1072
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001073LayerTestResult<float,4> CompareNormalizationTest(
1074 armnn::IWorkloadFactory& workloadFactory,
1075 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1076 armnn::IWorkloadFactory& refWorkloadFactory,
1077 armnn::NormalizationAlgorithmChannel normChannel,
1078 armnn::NormalizationAlgorithmMethod normMethod)
telsoa014fcda012018-03-09 14:13:49 +00001079{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001080 return CompareNormalizationTestImpl(workloadFactory, memoryManager, refWorkloadFactory, normChannel, normMethod);
telsoa014fcda012018-03-09 14:13:49 +00001081}
1082
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001083LayerTestResult<float,2> CompareSoftmaxTest(
1084 armnn::IWorkloadFactory& workloadFactory,
1085 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001086 armnn::IWorkloadFactory& refWorkloadFactory,
1087 float beta)
1088{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001089 return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
1090 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001091}
1092
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001093LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
1094 armnn::IWorkloadFactory& workloadFactory,
1095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001096 armnn::IWorkloadFactory& refWorkloadFactory,
1097 float beta)
1098{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001099 return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
1100 workloadFactory, memoryManager, refWorkloadFactory, beta);
telsoa014fcda012018-03-09 14:13:49 +00001101}
1102
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001103std::vector<LayerTestResult<float,3>> SplitterTest(
1104 armnn::IWorkloadFactory& workloadFactory,
1105 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001106{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001107 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001108}
1109
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001110std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
1111 armnn::IWorkloadFactory& workloadFactory,
1112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001113{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001114 return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001115}
1116
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001117LayerTestResult<float, 3> CopyViaSplitterTest(
1118 armnn::IWorkloadFactory& workloadFactory,
1119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001120{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001121 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001122}
1123
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001124LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
1125 armnn::IWorkloadFactory& workloadFactory,
1126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001127{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001128 return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001129}
1130
telsoa01c577f2c2018-08-31 09:22:23 +01001131LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001132 armnn::IWorkloadFactory& workloadFactory,
1133 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001134{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001135 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001136 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1137 { 2., 3., 3., 4. }));
1138
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001139 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001140 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1141 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1142 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001143 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001144 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001145}
1146
1147LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
Conor Kennedyb9971c92019-05-07 07:14:23 +01001148 armnn::IWorkloadFactory& workloadFactory,
1149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001150{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001151 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001152 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1153 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1154 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1155
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001156 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001157 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1158 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1159 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1160 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1161 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1162 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1163 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1164 0.02168f}));
Conor Kennedyb9971c92019-05-07 07:14:23 +01001165 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1166 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001167}
1168
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001169LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1170 armnn::IWorkloadFactory& workloadFactory,
1171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +01001172{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001173 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001174 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1175 {2., 3., 3., 4.}));
1176
1177
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001178 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
telsoa01c577f2c2018-08-31 09:22:23 +01001179 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1180 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1181 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1182
Conor Kennedyb9971c92019-05-07 07:14:23 +01001183 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001184 workloadFactory, memoryManager, input, expectedOutput);
telsoa01c577f2c2018-08-31 09:22:23 +01001185}
1186
Conor Kennedyb9971c92019-05-07 07:14:23 +01001187LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1188 armnn::IWorkloadFactory& workloadFactory,
1189 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1190{
1191 const float qScale = 1.0f;
1192 const int32_t qOffset = 0;
1193
1194 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1195 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1196
1197 armnn::TensorInfo inputDesc({2, 2}, datatype);
1198 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1199 std::vector<float>{2., 3., 3., 4.}));
1200
1201 armnn::TensorInfo outputDesc({2, 4}, datatype);
1202 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1203 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1204 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1205
1206 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1207 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1208
1209}
1210
1211LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1212 armnn::IWorkloadFactory& workloadFactory,
1213 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1214{
1215 const float qScale = 1.0f;
1216 const int32_t qOffset = 0;
1217
1218 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1219 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1220
1221 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1222 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1223 std::vector<float>({ 2., 3., 3., 4. })));
1224
1225 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1226 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1227 qOffset, std::vector<float>(
1228 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1229 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1230
1231 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
1232 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1233}
1234
1235LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
1236 armnn::IWorkloadFactory& workloadFactory,
1237 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1238{
1239 const float qScale = 2.0f;
1240 const int32_t qOffset = 0;
1241
1242 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1243 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1244
1245 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
1246 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1247 qOffset, std::vector<float>(
1248 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1249 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
1250
1251 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
1252 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1253 qOffset, std::vector<float>(
1254 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1255 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1256 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1257 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1258 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1259 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
1260
1261 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
1262 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1263}
1264
1265LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
1266 armnn::IWorkloadFactory& workloadFactory,
1267 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1268{
1269 const float qScale = 1.0f;
1270 const int32_t qOffset = 0;
1271
1272 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
1273
1274 armnn::TensorInfo inputDesc({2, 2}, datatype);
1275 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
1276 qOffset, std::vector<float>{2., 3., 3., 4.}));
1277
1278 armnn::TensorInfo outputDesc({2, 4}, datatype);
1279 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1280 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1281 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1282
1283 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1284 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
1285}
1286
Jim Flynn4ed6c832019-05-20 11:02:46 +01001287LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001288 armnn::IWorkloadFactory& workloadFactory,
1289 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001290{
surmeh013537c2c2018-05-18 16:31:43 +01001291 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00001292 unsigned int outputHeight = 6;
1293 unsigned int outputChannels = 3;
1294
surmeh013537c2c2018-05-18 16:31:43 +01001295 unsigned int inputWidth1 = 3;
1296 unsigned int inputHeight1 = 6;
1297 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00001298
surmeh013537c2c2018-05-18 16:31:43 +01001299 unsigned int inputWidth2 = 3;
1300 unsigned int inputHeight2 = 6;
1301 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00001302
telsoa01c577f2c2018-08-31 09:22:23 +01001303 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00001304 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
1305 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
1306 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00001307
1308 LayerTestResult<float,3> ret(outputTensorInfo);
1309
telsoa014fcda012018-03-09 14:13:49 +00001310 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
surmeh013537c2c2018-05-18 16:31:43 +01001311 {
1312 1.0f, 2.0f, 3.0f,
1313 4.0f, 5.0f, 6.0f,
1314 7.0f, 8.0f, 9.0f,
1315 10.0f, 11.0f, 12.0f,
1316 13.0f, 14.0f, 15.0f,
1317 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001318
surmeh013537c2c2018-05-18 16:31:43 +01001319 19.0f, 20.0f, 21.0f,
1320 22.0f, 23.0f, 24.0f,
1321 25.0f, 26.0f, 27.0f,
1322 28.0f, 29.0f, 30.0f,
1323 31.0f, 32.0f, 33.0f,
1324 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001325
surmeh013537c2c2018-05-18 16:31:43 +01001326 37.0f, 38.0f, 39.0f,
1327 40.0f, 41.0f, 42.0f,
1328 43.0f, 44.0f, 45.0f,
1329 46.0f, 47.0f, 48.0f,
1330 49.0f, 50.0f, 51.0f,
1331 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001332 })
1333 );
1334
telsoa014fcda012018-03-09 14:13:49 +00001335 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
1336 {
surmeh013537c2c2018-05-18 16:31:43 +01001337 1.0f, 2.0f, 3.0f,
1338 4.0f, 5.0f, 6.0f,
1339 7.0f, 8.0f, 9.0f,
1340 10.0f, 11.0f, 12.0f,
1341 13.0f, 14.0f, 15.0f,
1342 16.0f, 17.0f, 18.0f,
telsoa014fcda012018-03-09 14:13:49 +00001343
surmeh013537c2c2018-05-18 16:31:43 +01001344 19.0f, 20.0f, 21.0f,
1345 22.0f, 23.0f, 24.0f,
1346 25.0f, 26.0f, 27.0f,
1347 28.0f, 29.0f, 30.0f,
1348 31.0f, 32.0f, 33.0f,
1349 34.0f, 35.0f, 36.0f,
telsoa014fcda012018-03-09 14:13:49 +00001350 })
1351 );
1352
1353 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
1354 {
surmeh013537c2c2018-05-18 16:31:43 +01001355 37.0f, 38.0f, 39.0f,
1356 40.0f, 41.0f, 42.0f,
telsoa014fcda012018-03-09 14:13:49 +00001357 43.0f, 44.0f, 45.0f,
surmeh013537c2c2018-05-18 16:31:43 +01001358 46.0f, 47.0f, 48.0f,
1359 49.0f, 50.0f, 51.0f,
1360 52.0f, 53.0f, 54.0f,
telsoa014fcda012018-03-09 14:13:49 +00001361 })
1362 );
1363
telsoa01c577f2c2018-08-31 09:22:23 +01001364 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01001365 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00001366
telsoa01c577f2c2018-08-31 09:22:23 +01001367 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01001368 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00001369
telsoa014fcda012018-03-09 14:13:49 +00001370 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1371
1372 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
1373
1374 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
1375 subTensorsSupported ?
1376 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
1377 workloadFactory.CreateTensorHandle(inputTensorInfo1);
1378
1379 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
1380 subTensorsSupported ?
1381 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
1382 workloadFactory.CreateTensorHandle(inputTensorInfo2);
1383
Jim Flynne242f2d2019-05-22 14:24:13 +01001384 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00001385 armnn::WorkloadInfo info;
1386 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1387 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00001388 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1389
1390 data.m_ViewOrigins.push_back(window1);
1391 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00001392
Jim Flynn4ed6c832019-05-20 11:02:46 +01001393 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00001394
1395 inputHandle1->Allocate();
1396 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00001397 outputHandle->Allocate();
1398
1399 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
1400 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00001401
Derek Lambertif30f7d32019-04-09 10:25:02 +01001402 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001403 workload->Execute();
1404
1405 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
1406
1407 return ret;
1408}
1409
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001410LayerTestResult<float,4> AdditionTest(
1411 armnn::IWorkloadFactory& workloadFactory,
1412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001413{
1414 unsigned int batchSize = 2;
1415 unsigned int channels = 2;
1416 unsigned int height = 2;
1417 unsigned int width = 3;
1418
1419 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1420 armnn::TensorInfo outputTensorInfo;
1421
1422 unsigned int shape[] = {batchSize, channels, height, width};
1423
1424 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1425 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1426 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1427
1428
1429 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
1430 {
1431 0.0f, 2.0f, 1.0f,
1432 0.2f, 1.0f, 2.0f,
1433
1434 1.0f, 2.0f, 1.0f,
1435 0.2f, 1.0f, 2.0f,
1436
1437 0.0f, 2.0f, 1.0f,
1438 4.2f, 1.0f, 2.0f,
1439
1440 0.0f, 0.0f, 1.0f,
1441 0.2f, 1.0f, 2.0f,
1442 }));
1443
1444 auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
1445 {
1446 1.0f, 2.0f, 1.0f,
1447 0.0f, 1.0f, 2.0f,
1448
1449 1.0f, 2.0f, -2.0f,
1450 0.2f, 1.0f, 2.0f,
1451
1452 0.0f, 2.0f, 1.0f,
1453 4.2f, 0.0f, -3.0f,
1454
1455 0.0f, 0.0f, 1.0f,
1456 0.7f, 1.0f, 5.0f,
1457 }));
1458
1459 LayerTestResult<float,4> ret(outputTensorInfo);
1460 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
1461 {
1462 1.0f, 4.0f, 2.0f,
1463 0.2f, 2.0f, 4.0f,
1464
1465 2.0f, 4.0f, -1.0f,
1466 0.4f, 2.0f, 4.0f,
1467
1468 0.0f, 4.0f, 2.0f,
1469 8.4f, 1.0f, -1.0f,
1470
1471 0.0f, 0.0f, 2.0f,
1472 0.9f, 2.0f, 7.0f,
1473 }));
1474
1475 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1476 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1477 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1478
1479 armnn::AdditionQueueDescriptor data;
1480 armnn::WorkloadInfo info;
1481 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1482 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1483 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1484
1485 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1486
1487 inputHandle1->Allocate();
1488 inputHandle2->Allocate();
1489 outputHandle->Allocate();
1490
1491 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1492 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1493
Derek Lambertif30f7d32019-04-09 10:25:02 +01001494 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001495 workload->Execute();
1496
1497 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1498
1499 return ret;
1500}
1501
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001502template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001503LayerTestResult<T, 4> AdditionBroadcastTestImpl(
1504 armnn::IWorkloadFactory& workloadFactory,
1505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001506 float qScale,
1507 int32_t qOffset)
1508{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001509 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
1510 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
1511 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001512
1513 if (armnn::IsQuantizedType<T>())
1514 {
1515 inputTensorInfo1.SetQuantizationScale(qScale);
1516 inputTensorInfo1.SetQuantizationOffset(qOffset);
1517 inputTensorInfo2.SetQuantizationScale(qScale);
1518 inputTensorInfo2.SetQuantizationOffset(qOffset);
1519 outputTensorInfo.SetQuantizationScale(qScale);
1520 outputTensorInfo.SetQuantizationOffset(qOffset);
1521 }
1522
1523 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1524 {
1525 0.0f,
1526 1.0f,
1527
1528 2.0f,
1529 3.0f,
1530
1531 4.0f,
1532 5.0f,
1533 }));
1534
1535 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1536 {
1537 0.5f, 1.5f, 2.5f,
1538 3.5f, 4.5f, 5.5f,
1539 }));
1540
1541 LayerTestResult<T,4> ret(outputTensorInfo);
1542 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1543 {
1544 0.5f, 1.5f, 2.5f,
1545 4.5f, 5.5f, 6.5f,
1546
1547 2.5f, 3.5f, 4.5f,
1548 6.5f, 7.5f, 8.5f,
1549
1550 4.5f, 5.5f, 6.5f,
1551 8.5f, 9.5f, 10.5f,
1552 }));
1553
1554 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1555 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1556 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1557
1558 armnn::AdditionQueueDescriptor data;
1559 armnn::WorkloadInfo info;
1560 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1561 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1562 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1563
1564 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1565
1566 inputHandle1->Allocate();
1567 inputHandle2->Allocate();
1568 outputHandle->Allocate();
1569
1570 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1571 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1572
Derek Lambertif30f7d32019-04-09 10:25:02 +01001573 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001574 workload->Execute();
1575
1576 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1577
1578 return ret;
1579}
1580
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001581template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001582LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
1583 armnn::IWorkloadFactory& workloadFactory,
1584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00001585 float qScale,
1586 int32_t qOffset)
1587{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001588 armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
1589 armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
1590 armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001591
1592 if (armnn::IsQuantizedType<T>())
1593 {
1594 inputTensorInfo1.SetQuantizationScale(qScale);
1595 inputTensorInfo1.SetQuantizationOffset(qOffset);
1596 inputTensorInfo2.SetQuantizationScale(qScale);
1597 inputTensorInfo2.SetQuantizationOffset(qOffset);
1598 outputTensorInfo.SetQuantizationScale(qScale);
1599 outputTensorInfo.SetQuantizationOffset(qOffset);
1600 }
1601
1602 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
1603 {
1604 0.0f, 1.0f, 2.0f,
1605 3.0f, 4.0f, 5.0f,
1606 6.0f, 7.0f, 8.0f,
1607 9.0f, 10.0f, 11.0f,
1608 12.0f, 13.0f, 14.0f,
1609 15.0f, 16.0f, 17.0f,
1610 }));
1611
1612 auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
1613 {
1614 0.5f,
1615 }));
1616
1617 LayerTestResult<T,4> ret(outputTensorInfo);
1618 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
1619 {
1620 0.5f, 1.5f, 2.5f,
1621 3.5f, 4.5f, 5.5f,
1622 6.5f, 7.5f, 8.5f,
1623 9.5f, 10.5f, 11.5f,
1624 12.5f, 13.5f, 14.5f,
1625 15.5f, 16.5f, 17.5f,
1626 }));
1627
1628 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1629 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1630 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1631
1632 armnn::AdditionQueueDescriptor data;
1633 armnn::WorkloadInfo info;
1634 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1635 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1636 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1637
1638 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1639
1640 inputHandle1->Allocate();
1641 inputHandle2->Allocate();
1642 outputHandle->Allocate();
1643
1644 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1645 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1646
Derek Lambertif30f7d32019-04-09 10:25:02 +01001647 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001648 workload->Execute();
1649
1650 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1651
1652 return ret;
1653}
1654
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001655LayerTestResult<float, 4> AdditionBroadcastTest(
1656 armnn::IWorkloadFactory& workloadFactory,
1657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001658{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001659 return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
1660 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001661}
1662
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001663LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
1664 armnn::IWorkloadFactory& workloadFactory,
1665 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001666{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001667 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
1668 workloadFactory, memoryManager, 2.f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001669}
1670
Sadik Armagan2999a022019-04-09 14:20:12 +01001671LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
1672 armnn::IWorkloadFactory& workloadFactory,
1673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1674{
1675 return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
1676 workloadFactory, memoryManager, 2.f, 0);
1677}
1678
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001679LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
1680 armnn::IWorkloadFactory& workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001682{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001683 return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
1684 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00001685}
1686
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001687LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
1688 armnn::IWorkloadFactory& workloadFactory,
1689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00001690{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001691 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
1692 workloadFactory, memoryManager, 0.1333333f, 128);
telsoa014fcda012018-03-09 14:13:49 +00001693}
1694
Sadik Armagan2999a022019-04-09 14:20:12 +01001695LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
1696 armnn::IWorkloadFactory& workloadFactory,
1697 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1698{
1699 return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
1700 workloadFactory, memoryManager, 0.1333333f, 0);
1701}
1702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001703LayerTestResult<float,4> CompareAdditionTest(
1704 armnn::IWorkloadFactory& workloadFactory,
1705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1706 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001707{
1708 unsigned int batchSize = 4;
1709 unsigned int channels = 1;
1710 unsigned int height = 2;
1711 unsigned int width = 3;
1712
1713 armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
1714 armnn::TensorInfo outputTensorInfo;
1715
1716 unsigned int shape[] = {batchSize, channels, height, width};
1717
1718 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1719 inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1720 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
1721
1722 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
1723 auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
1724
1725 LayerTestResult<float,4> ret(outputTensorInfo);
1726
1727 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1728 std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
1729 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1730
1731 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
1732 std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
1733 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1734
1735 armnn::AdditionQueueDescriptor data;
1736 armnn::WorkloadInfo info;
1737 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1738 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
1739 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1740
1741 armnn::AdditionQueueDescriptor refData = data;
1742 armnn::WorkloadInfo refInfo = info;
1743 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
1744 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
1745 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1746
1747 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
1748 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
1749
1750 inputHandle1->Allocate();
1751 inputHandle2->Allocate();
1752 outputHandle->Allocate();
1753 inputHandle1Ref->Allocate();
1754 inputHandle2Ref->Allocate();
1755 outputHandleRef->Allocate();
1756
1757 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1758 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
1759 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
1760 CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
1761
Derek Lambertif30f7d32019-04-09 10:25:02 +01001762 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001763 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01001764 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00001765 workloadRef->Execute();
1766
1767 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1768 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1769
1770 return ret;
1771}
1772
surmeh01bceff2f2018-03-29 16:29:27 +01001773namespace {
Sadik Armagan2999a022019-04-09 14:20:12 +01001774template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001775LayerTestResult<T, 4> DivisionTestHelper(
1776 armnn::IWorkloadFactory& workloadFactory,
1777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1778 const unsigned int shape0[4],
1779 const std::vector<T>& values0,
1780 float scale0,
1781 int32_t offset0,
1782 const unsigned int shape1[4],
1783 const std::vector<T> & values1,
1784 float scale1,
1785 int32_t offset1,
1786 const unsigned int outShape[4],
1787 const std::vector<T> & outValues,
1788 float outScale,
1789 int32_t outOffset)
David Beck5cd01f32018-09-12 16:00:08 +01001790{
Sadik Armagan2999a022019-04-09 14:20:12 +01001791 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
1792 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
1793 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001794
David Beck5cd01f32018-09-12 16:00:08 +01001795 inputTensorInfo0.SetQuantizationScale(scale0);
1796 inputTensorInfo0.SetQuantizationOffset(offset0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001797
David Beck5cd01f32018-09-12 16:00:08 +01001798 inputTensorInfo1.SetQuantizationScale(scale1);
1799 inputTensorInfo1.SetQuantizationOffset(offset1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001800
David Beck5cd01f32018-09-12 16:00:08 +01001801 outputTensorInfo.SetQuantizationScale(outScale);
1802 outputTensorInfo.SetQuantizationOffset(outOffset);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001803
David Beck5cd01f32018-09-12 16:00:08 +01001804 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
1805 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001806
David Beck5cd01f32018-09-12 16:00:08 +01001807 LayerTestResult<T, 4> result(outputTensorInfo);
1808 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001809
David Beck5cd01f32018-09-12 16:00:08 +01001810 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
1811 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
1812 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001813
David Beck5cd01f32018-09-12 16:00:08 +01001814 armnn::DivisionQueueDescriptor data;
1815 armnn::WorkloadInfo info;
1816 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
1817 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
1818 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001819
David Beck5cd01f32018-09-12 16:00:08 +01001820 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001821
David Beck5cd01f32018-09-12 16:00:08 +01001822 inputHandle0->Allocate();
1823 inputHandle1->Allocate();
1824 outputHandle->Allocate();
1825
1826 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
1827 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
1828
Derek Lambertif30f7d32019-04-09 10:25:02 +01001829 workload->PostAllocationConfigure();
David Beck5cd01f32018-09-12 16:00:08 +01001830 workload->Execute();
1831
1832 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
1833
1834 return result;
1835}
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001836} // anonymous namespace
1837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001838LayerTestResult<float,4> DivisionByZeroTest(
1839 armnn::IWorkloadFactory& workloadFactory,
1840 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001841{
1842 const unsigned int width = 2;
1843 const unsigned int height = 2;
1844 const unsigned int channelCount = 2;
1845 const unsigned int batchSize = 2;
1846
1847 unsigned int shape[] = { batchSize, channelCount, height, width };
1848
1849 std::vector<float> input0({
1850 1.f, 1.f, 1.f, 1.f, 0.f, 0.f, 0.f, 0.f,
1851 -1.f, -1.f, -1.f, -1.f, 5.f, 5.f, 5.f, 5.f });
1852
1853 std::vector<float> input1({
1854 0.f, 0.f, -0.f, -0.f, 0.f, 0.f, -0.f, -0.f,
1855 0.f, 0.f, -0.f, -0.f, 5.f, 5.f, 5.f, 5.f });
1856
1857 std::vector<float> output({
1858 INFINITY, INFINITY, -INFINITY, -INFINITY, NAN, NAN, -NAN, -NAN,
1859 -INFINITY, -INFINITY, INFINITY, INFINITY, 1, 1, 1, 1 });
1860
Sadik Armagan2999a022019-04-09 14:20:12 +01001861 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1862 memoryManager,
1863 shape, input0, 1.0f, 0,
1864 shape, input1, 1.0f, 0,
1865 shape, output, 1.0f, 0);
Francis Murtagh8c5e3dc2018-08-30 17:18:37 +01001866}
1867
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001868LayerTestResult<float,4> DivisionTest(
1869 armnn::IWorkloadFactory& workloadFactory,
1870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001871{
1872 const unsigned int width = 2;
1873 const unsigned int height = 2;
1874 const unsigned int channelCount = 2;
1875 const unsigned int batchSize = 2;
1876
1877 unsigned int shape[] = { batchSize, channelCount, height, width };
1878
1879 std::vector<float> input0({
1880 2, 2, 2, 2, 3, 3, 3, 3,
1881 4, 4, 4, 4, 5, 5, 5, 5 });
1882
1883 std::vector<float> input1({
1884 1, 1, 1, 1, 2, 2, 2, 2,
1885 4, 4, 4, 4, 4, 4, 4, 4 });
1886
1887 std::vector<float> output({
1888 2, 2, 2, 2, 1.5, 1.5, 1.5, 1.5,
1889 1, 1, 1, 1, 1.25, 1.25, 1.25, 1.25 });
1890
David Beck5cd01f32018-09-12 16:00:08 +01001891
Sadik Armagan2999a022019-04-09 14:20:12 +01001892 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1893 memoryManager,
1894 shape, input0, 1.0f, 0,
1895 shape, input1, 1.0f, 0,
1896 shape, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001897}
1898
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001899LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
1900 armnn::IWorkloadFactory& workloadFactory,
1901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001902{
1903 unsigned int shape0[] = { 1, 2, 2, 2 };
1904 std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1905
1906 unsigned int shape1[] = { 1, 1, 1, 1 };
1907 std::vector<float> input1({ 2 });
1908
1909 std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1910
David Beck5cd01f32018-09-12 16:00:08 +01001911
Sadik Armagan2999a022019-04-09 14:20:12 +01001912 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1913 memoryManager,
1914 shape0, input0, 1.0f, 0,
1915 shape1, input1, 1.0f, 0,
1916 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001917}
1918
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001919LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
1920 armnn::IWorkloadFactory& workloadFactory,
1921 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001922{
1923 unsigned int shape0[] = { 1, 3, 3, 2 };
1924 std::vector<float> input0({
1925 1, 4, 3, 8, 5, 12,
1926 7, 16, 9, 20, 11, 24,
1927 13, 28, 15, 32, 17, 36});
1928
1929 unsigned int shape1[] = { 1, 1, 1, 2 };
1930 std::vector<float> input1({ 1, 2 });
1931
1932 std::vector<float> output({
1933 1, 2, 3, 4, 5, 6,
1934 7, 8, 9, 10, 11, 12,
1935 13, 14, 15, 16, 17, 18});
1936
Sadik Armagan2999a022019-04-09 14:20:12 +01001937 return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
1938 memoryManager,
1939 shape0, input0, 1.0f, 0,
1940 shape1, input1, 1.0f, 0,
1941 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001942}
1943
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001944LayerTestResult<uint8_t,4> DivisionUint8Test(
1945 armnn::IWorkloadFactory& workloadFactory,
1946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001947{
1948 const unsigned int width = 2;
1949 const unsigned int height = 2;
1950 const unsigned int channelCount = 2;
1951 const unsigned int batchSize = 2;
1952
1953 unsigned int shape[] = { batchSize, channelCount, height, width };
1954
1955 std::vector<uint8_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
1956 4, 4, 4, 4, 5, 5, 5, 5 });
1957
1958 std::vector<uint8_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
1959 4, 4, 4, 4, 4, 4, 4, 4 });
1960
1961 std::vector<uint8_t> output({8, 8, 8, 8, 6, 6, 6, 6,
1962 4, 4, 4, 4, 5, 5, 5, 5});
1963
1964
Sadik Armagan2999a022019-04-09 14:20:12 +01001965 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1966 memoryManager,
1967 shape, input0, 1.0f, 0,
1968 shape, input1, 1.0f, 0,
1969 shape, output, 0.25f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001970}
1971
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001972LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
1973 armnn::IWorkloadFactory& workloadFactory,
1974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001975{
1976 unsigned int shape0[] = { 1, 2, 2, 2 };
1977 std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
1978
1979 unsigned int shape1[] = { 1, 1, 1, 1 };
1980 std::vector<uint8_t> input1({ 2 });
1981
1982 std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
1983
Sadik Armagan2999a022019-04-09 14:20:12 +01001984 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
1985 memoryManager,
1986 shape0, input0, 1.0f, 0,
1987 shape1, input1, 1.0f, 0,
1988 shape0, output, 1.0f, 0);
David Beck5cd01f32018-09-12 16:00:08 +01001989}
1990
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001991LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
1992 armnn::IWorkloadFactory& workloadFactory,
1993 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beck5cd01f32018-09-12 16:00:08 +01001994{
1995 unsigned int shape0[] = { 1, 3, 3, 2 };
1996 std::vector<uint8_t> input0({1, 4, 3, 8, 5, 12,
1997 7, 16, 9, 20, 11, 24,
1998 13, 28, 15, 32, 17, 36});
1999
2000 unsigned int shape1[] = { 1, 1, 1, 2 };
2001 std::vector<uint8_t> input1({ 1, 2 });
2002
2003 std::vector<uint8_t> output({1, 2, 3, 4, 5, 6,
2004 7, 8, 9, 10, 11, 12,
2005 13, 14, 15, 16, 17, 18});
2006
Sadik Armagan2999a022019-04-09 14:20:12 +01002007 return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
2008 memoryManager,
2009 shape0, input0, 1.0f, 0,
2010 shape1, input1, 1.0f, 0,
2011 shape0, output, 1.0f, 0);
2012}
2013
2014LayerTestResult<int16_t,4> DivisionInt16Test(
2015 armnn::IWorkloadFactory& workloadFactory,
2016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2017{
2018 unsigned int shape[] = { 2, 2, 2, 2 };
2019
2020 std::vector<int16_t> input0({2, 2, 2, 2, 3, 3, 3, 3,
2021 4, 4, 4, 4, 5, 5, 5, 5 });
2022
2023 std::vector<int16_t> input1({1, 1, 1, 1, 2, 2, 2, 2,
2024 4, 4, 4, 4, 4, 4, 4, 4 });
2025
2026 std::vector<int16_t> output({8, 8, 8, 8, 6, 6, 6, 6,
2027 4, 4, 4, 4, 5, 5, 5, 5});
2028
2029
2030 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2031 memoryManager,
2032 shape, input0, 1.0f, 0,
2033 shape, input1, 1.0f, 0,
2034 shape, output, 0.25f, 0);
2035}
2036
2037LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
2038 armnn::IWorkloadFactory& workloadFactory,
2039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2040{
2041 unsigned int shape0[] = { 1, 2, 2, 2 };
2042 std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
2043
2044 unsigned int shape1[] = { 1, 1, 1, 1 };
2045 std::vector<int16_t> input1({ 2 });
2046
2047 std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
2048
2049 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2050 memoryManager,
2051 shape0, input0, 1.0f, 0,
2052 shape1, input1, 1.0f, 0,
2053 shape0, output, 1.0f, 0);
2054}
2055
2056LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
2057 armnn::IWorkloadFactory& workloadFactory,
2058 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2059{
2060 unsigned int shape0[] = { 1, 3, 3, 2 };
2061 std::vector<int16_t> input0({1, 4, 3, 8, 5, 12,
2062 7, 16, 9, 20, 11, 24,
2063 13, 28, 15, 32, 17, 36});
2064
2065 unsigned int shape1[] = { 1, 1, 1, 2 };
2066 std::vector<int16_t> input1({ 1, 2 });
2067
2068 std::vector<int16_t> output({1, 2, 3, 4, 5, 6,
2069 7, 8, 9, 10, 11, 12,
2070 13, 14, 15, 16, 17, 18});
2071
2072 return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
2073 memoryManager,
2074 shape0, input0, 1.0f, 0,
2075 shape1, input1, 1.0f, 0,
2076 shape0, output, 1.0f, 0);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002077}
2078
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002079template<typename DescriptorType>
2080std::unique_ptr<armnn::IWorkload> CreateWorkload(
2081 const armnn::IWorkloadFactory& workloadFactory,
2082 const armnn::WorkloadInfo& info,
2083 const DescriptorType& descriptor)
2084{
2085 return CreateWorkload(workloadFactory, info, descriptor);
2086};
2087
2088template<>
2089std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
2090 const armnn::IWorkloadFactory& workloadFactory,
2091 const armnn::WorkloadInfo& info,
2092 const armnn::MaximumQueueDescriptor& descriptor)
2093{
2094 return workloadFactory.CreateMaximum(descriptor, info);
2095}
2096
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002097template<>
2098std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
2099 const armnn::IWorkloadFactory& workloadFactory,
2100 const armnn::WorkloadInfo& info,
2101 const armnn::MinimumQueueDescriptor& descriptor)
2102{
2103 return workloadFactory.CreateMinimum(descriptor, info);
2104}
2105
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002106template<>
2107std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
2108 const armnn::IWorkloadFactory& workloadFactory,
2109 const armnn::WorkloadInfo& info,
2110 const armnn::EqualQueueDescriptor& descriptor)
2111{
2112 return workloadFactory.CreateEqual(descriptor, info);
2113}
2114
FrancisMurtagh878f0232018-12-19 10:56:15 +00002115template<>
2116std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
2117 const armnn::IWorkloadFactory& workloadFactory,
2118 const armnn::WorkloadInfo& info,
2119 const armnn::GreaterQueueDescriptor& descriptor)
2120{
2121 return workloadFactory.CreateGreater(descriptor, info);
2122}
2123
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002124namespace {
kevmay012b4d88e2019-01-24 14:05:09 +00002125
2126template <typename Descriptor,
2127 armnn::DataType ArmnnTypeInput,
2128 armnn::DataType ArmnnTypeOutput,
2129 typename TInput = armnn::ResolveType<ArmnnTypeInput>,
2130 typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
2131LayerTestResult<TOutput, 4> ElementwiseTestHelper(
2132 armnn::IWorkloadFactory & workloadFactory,
2133 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2134 const unsigned int shape0[4], std::vector<TInput> values0,
2135 const unsigned int shape1[4], std::vector<TInput> values1,
2136 const unsigned int outShape[4], std::vector<TOutput> outValues,
2137 float qScale = 0.0f, int qOffset = 0)
2138{
2139 const size_t dimensionCount = 4;
2140 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
2141 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
2142 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
2143
2144 auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
2145 auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
2146
2147 if (armnn::IsQuantizedType<TInput>())
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002148 {
kevmay012b4d88e2019-01-24 14:05:09 +00002149 inputTensorInfo0.SetQuantizationScale(qScale);
2150 inputTensorInfo0.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002151
kevmay012b4d88e2019-01-24 14:05:09 +00002152 inputTensorInfo1.SetQuantizationScale(qScale);
2153 inputTensorInfo1.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002154
kevmay012b4d88e2019-01-24 14:05:09 +00002155 outputTensorInfo.SetQuantizationScale(qScale);
2156 outputTensorInfo.SetQuantizationOffset(qOffset);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002157 }
kevmay012b4d88e2019-01-24 14:05:09 +00002158
2159 LayerTestResult<TOutput,4> ret(outputTensorInfo);
2160
2161 if(ArmnnTypeOutput == armnn::DataType::Boolean)
2162 {
2163 ret.compareBoolean = true;
2164 }
2165
2166 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2167 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2168 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2169
2170 Descriptor data;
2171 armnn::WorkloadInfo info;
2172 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2173 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2174 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2175 auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
2176
2177 inputHandle0->Allocate();
2178 inputHandle1->Allocate();
2179 outputHandle->Allocate();
2180
2181 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
2182 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
2183
Derek Lambertif30f7d32019-04-09 10:25:02 +01002184 workload->PostAllocationConfigure();
kevmay012b4d88e2019-01-24 14:05:09 +00002185 ExecuteWorkload(*workload, memoryManager);
2186
2187 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
2188
2189 ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
2190 return ret;
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002191}
2192
kevmay012b4d88e2019-01-24 14:05:09 +00002193template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
2194LayerTestResult<T, 4> ElementwiseTestHelper(
2195 armnn::IWorkloadFactory & workloadFactory,
2196 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
2197 const unsigned int shape0[4], std::vector<T> values0,
2198 const unsigned int shape1[4], std::vector<T> values1,
2199 const unsigned int outShape[4], std::vector<T> outValues,
2200 float qScale = 0.0f, int qOffset = 0)
2201{
2202 return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
2203 (workloadFactory,
2204 memoryManager,
2205 shape0,
2206 values0,
2207 shape1,
2208 values1,
2209 outShape,
2210 outValues,
2211 qScale,
2212 qOffset);
2213}
2214}
2215
2216LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002218{
2219 const unsigned int width = 2;
2220 const unsigned int height = 2;
2221 const unsigned int channelCount = 2;
2222 const unsigned int batchSize = 2;
2223
2224 unsigned int shape[] = { batchSize, channelCount, height, width };
2225
2226 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2227 3, 3, 3, 3, 4, 4, 4, 4 });
2228
2229 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2230 5, 5, 5, 5, 4, 4, 4, 4 });
2231
kevmay012b4d88e2019-01-24 14:05:09 +00002232 std::vector<uint8_t> output({ 1, 1, 1, 1, 0, 0, 0, 0,
2233 0, 0, 0, 0, 1, 1, 1, 1 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002234
kevmay012b4d88e2019-01-24 14:05:09 +00002235 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002236 workloadFactory,
2237 memoryManager,
2238 shape,
2239 input0,
2240 shape,
2241 input1,
2242 shape,
2243 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002244}
2245
kevmay012b4d88e2019-01-24 14:05:09 +00002246LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002247 armnn::IWorkloadFactory& workloadFactory,
2248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2249{
2250 unsigned int shape0[] = { 1, 2, 2, 2 };
2251 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2252
2253 unsigned int shape1[] = { 1, 1, 1, 1 };
2254 std::vector<float> input1({ 1 });
2255
kevmay012b4d88e2019-01-24 14:05:09 +00002256 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002257
kevmay012b4d88e2019-01-24 14:05:09 +00002258 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002259 workloadFactory,
2260 memoryManager,
2261 shape0,
2262 input0,
2263 shape1,
2264 input1,
2265 shape0,
2266 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002267}
2268
kevmay012b4d88e2019-01-24 14:05:09 +00002269LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002270 armnn::IWorkloadFactory& workloadFactory,
2271 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2272{
2273 const unsigned int shape0[] = { 1, 2, 2, 3 };
2274 const unsigned int shape1[] = { 1, 1, 1, 3 };
2275
2276 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2277 7, 8, 9, 10, 11, 12 });
2278
2279 std::vector<float> input1({ 1, 2, 3});
2280
kevmay012b4d88e2019-01-24 14:05:09 +00002281 std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
2282 0, 0, 0, 0, 0, 0 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002283
kevmay012b4d88e2019-01-24 14:05:09 +00002284 return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002285 workloadFactory,
2286 memoryManager,
2287 shape0,
2288 input0,
2289 shape1,
2290 input1,
2291 shape0,
2292 output);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002293}
2294
2295LayerTestResult<uint8_t, 4> EqualUint8Test(
2296 armnn::IWorkloadFactory& workloadFactory,
2297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2298{
2299 unsigned int shape[] = { 2, 2, 2, 2 };
2300
2301 // See dequantized values to the right.
2302 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002303 3, 3, 3, 3, 7, 7, 7, 7 });
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002304
2305 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2306 3, 3, 3, 3, 5, 5, 5, 5 });
2307
2308 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2309 1, 1, 1, 1, 0, 0, 0, 0 });
2310
kevmay012b4d88e2019-01-24 14:05:09 +00002311 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2312 armnn::DataType::QuantisedAsymm8,
2313 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002314 workloadFactory,
2315 memoryManager,
2316 shape,
2317 input0,
2318 shape,
2319 input1,
2320 shape,
2321 output,
2322 1.0f,
2323 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002324}
2325
2326LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
2327 armnn::IWorkloadFactory& workloadFactory,
2328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2329{
2330 const unsigned int shape0[] = { 1, 2, 2, 3 };
2331 const unsigned int shape1[] = { 1, 1, 1, 1 };
2332
2333 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2334 7, 8, 9, 10, 11, 12 });
2335
2336 std::vector<uint8_t> input1({ 1 });
2337
2338 std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
2339 0, 0, 0, 0, 0, 0 });
2340
kevmay012b4d88e2019-01-24 14:05:09 +00002341 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2342 armnn::DataType::QuantisedAsymm8,
2343 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002344 workloadFactory,
2345 memoryManager,
2346 shape0,
2347 input0,
2348 shape1,
2349 input1,
2350 shape0,
2351 output,
2352 1.0f,
2353 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002354}
2355
2356LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
2357 armnn::IWorkloadFactory& workloadFactory,
2358 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2359{
2360 const unsigned int shape0[] = { 1, 2, 2, 3 };
2361 const unsigned int shape1[] = { 1, 1, 1, 3 };
2362
2363 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2364 7, 8, 9, 10, 11, 12 });
2365
2366 std::vector<uint8_t> input1({ 1, 1, 3});
2367
2368 std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
2369 0, 0, 0, 0, 0, 0 });
2370
kevmay012b4d88e2019-01-24 14:05:09 +00002371 return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
2372 armnn::DataType::QuantisedAsymm8,
2373 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002374 workloadFactory,
2375 memoryManager,
2376 shape0,
2377 input0,
2378 shape1,
2379 input1,
2380 shape0,
2381 output,
2382 1.0f,
2383 0);
FrancisMurtagh30cdfca2018-12-18 12:57:35 +00002384}
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002385
kevmay012b4d88e2019-01-24 14:05:09 +00002386LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
FrancisMurtagh878f0232018-12-19 10:56:15 +00002387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2388{
2389 const unsigned int width = 2;
2390 const unsigned int height = 2;
2391 const unsigned int channelCount = 2;
2392 const unsigned int batchSize = 2;
2393
2394 unsigned int shape[] = { batchSize, channelCount, height, width };
2395
2396 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2397 3, 3, 3, 3, 4, 4, 4, 4 });
2398
2399 std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
2400 5, 5, 5, 5, 4, 4, 4, 4 });
2401
kevmay012b4d88e2019-01-24 14:05:09 +00002402 std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
2403 0, 0, 0, 0, 0, 0, 0, 0 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002404
kevmay012b4d88e2019-01-24 14:05:09 +00002405 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002406 workloadFactory,
2407 memoryManager,
2408 shape,
2409 input0,
2410 shape,
2411 input1,
2412 shape,
2413 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002414}
2415
kevmay012b4d88e2019-01-24 14:05:09 +00002416LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002417 armnn::IWorkloadFactory& workloadFactory,
2418 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2419{
2420 unsigned int shape0[] = { 1, 2, 2, 2 };
2421 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2422
2423 unsigned int shape1[] = { 1, 1, 1, 1 };
2424 std::vector<float> input1({ 1 });
2425
kevmay012b4d88e2019-01-24 14:05:09 +00002426 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
FrancisMurtagh878f0232018-12-19 10:56:15 +00002427
kevmay012b4d88e2019-01-24 14:05:09 +00002428 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002429 workloadFactory,
2430 memoryManager,
2431 shape0,
2432 input0,
2433 shape1,
2434 input1,
2435 shape0,
2436 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002437}
2438
kevmay012b4d88e2019-01-24 14:05:09 +00002439LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
FrancisMurtagh878f0232018-12-19 10:56:15 +00002440 armnn::IWorkloadFactory& workloadFactory,
2441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2442{
2443 const unsigned int shape0[] = { 1, 2, 2, 3 };
2444 const unsigned int shape1[] = { 1, 1, 1, 3 };
2445
2446 std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
2447 7, 8, 9, 10, 11, 12 });
2448
2449 std::vector<float> input1({ 1, 3, 2});
2450
kevmay012b4d88e2019-01-24 14:05:09 +00002451 std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
2452 1, 1, 1, 1, 1, 1 });
FrancisMurtagh878f0232018-12-19 10:56:15 +00002453
kevmay012b4d88e2019-01-24 14:05:09 +00002454 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002455 workloadFactory,
2456 memoryManager,
2457 shape0,
2458 input0,
2459 shape1,
2460 input1,
2461 shape0,
2462 output);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002463}
2464
2465LayerTestResult<uint8_t, 4> GreaterUint8Test(
2466 armnn::IWorkloadFactory& workloadFactory,
2467 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2468{
2469 unsigned int shape[] = { 2, 2, 2, 2 };
2470
2471 // See dequantized values to the right.
2472 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2473 3, 3, 3, 3, 5, 5, 5, 5 });
2474
2475 std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
2476 2, 2, 2, 2, 5, 5, 5, 5 });
2477
2478 std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
2479 1, 1, 1, 1, 0, 0, 0, 0 });
2480
kevmay012b4d88e2019-01-24 14:05:09 +00002481 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2482 armnn::DataType::QuantisedAsymm8,
2483 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002484 workloadFactory,
2485 memoryManager,
2486 shape,
2487 input0,
2488 shape,
2489 input1,
2490 shape,
2491 output,
2492 1.0f,
2493 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002494}
2495
2496LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
2497 armnn::IWorkloadFactory& workloadFactory,
2498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2499{
2500 const unsigned int shape0[] = { 1, 2, 2, 3 };
2501 const unsigned int shape1[] = { 1, 1, 1, 1 };
2502
2503 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2504 7, 8, 9, 10, 11, 12 });
2505
2506 std::vector<uint8_t> input1({ 1 });
2507
2508 std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
2509 1, 1, 1, 1, 1, 1 });
2510
kevmay012b4d88e2019-01-24 14:05:09 +00002511 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2512 armnn::DataType::QuantisedAsymm8,
2513 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002514 workloadFactory,
2515 memoryManager,
2516 shape0,
2517 input0,
2518 shape1,
2519 input1,
2520 shape0,
2521 output,
2522 1.0f,
2523 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002524}
2525
2526LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
2527 armnn::IWorkloadFactory& workloadFactory,
2528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2529{
2530 const unsigned int shape0[] = { 1, 2, 2, 3 };
2531 const unsigned int shape1[] = { 1, 1, 1, 3 };
2532
2533 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2534 7, 8, 9, 10, 11, 12 });
2535
2536 std::vector<uint8_t> input1({ 1, 1, 3});
2537
2538 std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
2539 1, 1, 1, 1, 1, 1 });
2540
kevmay012b4d88e2019-01-24 14:05:09 +00002541 return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
2542 armnn::DataType::QuantisedAsymm8,
2543 armnn::DataType::Boolean>(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002544 workloadFactory,
2545 memoryManager,
2546 shape0,
2547 input0,
2548 shape1,
2549 input1,
2550 shape0,
2551 output,
2552 1.0f,
2553 0);
FrancisMurtagh878f0232018-12-19 10:56:15 +00002554}
2555
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002556LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
2557 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2558{
2559 const unsigned int width = 2;
2560 const unsigned int height = 2;
2561 const unsigned int channelCount = 2;
2562 const unsigned int batchSize = 2;
2563
2564 unsigned int shape[] = { batchSize, channelCount, height, width };
2565
2566 std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
2567 3, 3, 3, 3, 4, 4, 4, 4 });
2568
2569 std::vector<float> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2570 4, 4, 4, 4, 5, 5, 5, 5 });
2571
2572 std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
2573 4, 4, 4, 4, 5, 5, 5, 5 });
2574
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002575 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2576 workloadFactory,
2577 memoryManager,
2578 shape,
2579 input0,
2580 shape,
2581 input1,
2582 shape,
2583 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002584}
2585
2586LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
2587 armnn::IWorkloadFactory& workloadFactory,
2588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2589{
2590 unsigned int shape0[] = { 1, 2, 2, 2 };
2591 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2592
2593 unsigned int shape1[] = { 1, 1, 1, 1 };
2594 std::vector<float> input1({ 2 });
2595
2596 std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
2597
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002598 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2599 workloadFactory,
2600 memoryManager,
2601 shape0,
2602 input0,
2603 shape1,
2604 input1,
2605 shape0,
2606 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002607}
2608
2609LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
2610 armnn::IWorkloadFactory& workloadFactory,
2611 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2612{
2613 const unsigned int shape0[] = { 1, 2, 2, 3 };
2614 const unsigned int shape1[] = { 1, 1, 1, 3 };
2615
2616 std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
2617 7, 8, 9, 10, 11, 12 });
2618
2619 std::vector<float> input1({ 1, 2, 3});
2620
2621 std::vector<float> output({ 1, 2, 3, 4, 5, 6,
kevmay012b4d88e2019-01-24 14:05:09 +00002622 7, 8, 9, 10, 11, 12 });
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002623
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002624 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
2625 workloadFactory,
2626 memoryManager,
2627 shape0,
2628 input0,
2629 shape1,
2630 input1,
2631 shape0,
2632 output);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002633}
2634
2635LayerTestResult<uint8_t, 4> MaximumUint8Test(
2636 armnn::IWorkloadFactory& workloadFactory,
2637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2638{
2639 unsigned int shape[] = { 2, 2, 2, 2 };
2640
2641 // See dequantized values to the right.
2642 std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2643 3, 3, 3, 3, 4, 4, 4, 4 });
2644
2645 std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2646 4, 4, 4, 4, 5, 5, 5, 5 });
2647
2648 std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2649 4, 4, 4, 4, 5, 5, 5, 5 });
2650
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002651 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2652 workloadFactory,
2653 memoryManager,
2654 shape,
2655 input0,
2656 shape,
2657 input1,
2658 shape,
2659 output,
2660 1.0f,
2661 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002662}
2663
2664LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
2665 armnn::IWorkloadFactory& workloadFactory,
2666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2667{
2668 const unsigned int shape0[] = { 1, 2, 2, 3 };
2669 const unsigned int shape1[] = { 1, 1, 1, 1 };
2670
2671 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2672 7, 8, 9, 10, 11, 12 });
2673
2674 std::vector<uint8_t> input1({2});
2675
2676 std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
2677 7, 8, 9, 10, 11, 12 });
2678
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002679 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2680 workloadFactory,
2681 memoryManager,
2682 shape0,
2683 input0,
2684 shape1,
2685 input1,
2686 shape0,
2687 output,
2688 1.0f,
2689 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002690}
2691
2692LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
2693 armnn::IWorkloadFactory& workloadFactory,
2694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2695{
2696 const unsigned int shape0[] = { 1, 2, 2, 3 };
2697 const unsigned int shape1[] = { 1, 1, 1, 3 };
2698
2699 std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
2700 7, 8, 9, 10, 11, 12 });
2701
2702 std::vector<uint8_t> input1({ 1, 10, 3});
2703
2704 std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
2705 7, 10, 9, 10, 11, 12 });
2706
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002707 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2708 workloadFactory,
2709 memoryManager,
2710 shape0,
2711 input0,
2712 shape1,
2713 input1,
2714 shape0,
2715 output,
2716 1.0f,
2717 0);
Éanna Ó Catháinde705582018-12-03 13:04:22 +00002718}
2719
Sadik Armagan2999a022019-04-09 14:20:12 +01002720LayerTestResult<int16_t, 4> MaximumInt16Test(
2721 armnn::IWorkloadFactory& workloadFactory,
2722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2723{
2724 unsigned int shape[] = { 2, 2, 2, 2 };
2725
2726 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2727 3, 3, 3, 3, 4, 4, 4, 4 });
2728
2729 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2730 4, 4, 4, 4, 5, 5, 5, 5 });
2731
2732 std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
2733 4, 4, 4, 4, 5, 5, 5, 5 });
2734
2735 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2736 workloadFactory,
2737 memoryManager,
2738 shape,
2739 input0,
2740 shape,
2741 input1,
2742 shape,
2743 output,
2744 1.0f,
2745 0);
2746}
2747
2748LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
2749 armnn::IWorkloadFactory& workloadFactory,
2750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2751{
2752 const unsigned int shape0[] = { 1, 2, 2, 3 };
2753 const unsigned int shape1[] = { 1, 1, 1, 1 };
2754
2755 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2756 7, 8, 9, 10, 11, 12 });
2757
2758 std::vector<int16_t> input1({2});
2759
2760 std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
2761 7, 8, 9, 10, 11, 12 });
2762
2763 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2764 workloadFactory,
2765 memoryManager,
2766 shape0,
2767 input0,
2768 shape1,
2769 input1,
2770 shape0,
2771 output,
2772 1.0f,
2773 0);
2774}
2775
2776LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
2777 armnn::IWorkloadFactory& workloadFactory,
2778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2779{
2780 const unsigned int shape0[] = { 1, 2, 2, 3 };
2781 const unsigned int shape1[] = { 1, 1, 1, 3 };
2782
2783 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2784 7, 8, 9, 10, 11, 12 });
2785
2786 std::vector<int16_t> input1({ 1, 10, 3});
2787
2788 std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
2789 7, 10, 9, 10, 11, 12 });
2790
2791 return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2792 workloadFactory,
2793 memoryManager,
2794 shape0,
2795 input0,
2796 shape1,
2797 input1,
2798 shape0,
2799 output,
2800 1.0f,
2801 0);
2802}
2803
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002804LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
2805 armnn::IWorkloadFactory& workloadFactory,
2806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2807{
2808 unsigned int shape0[] = { 1, 2, 2, 2 };
2809 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
2810
2811 unsigned int shape1[] = { 1, 1, 1, 1 };
2812 std::vector<float> input1({ 2 });
2813
2814 std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
2815
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002816 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2817 workloadFactory,
2818 memoryManager,
2819 shape0,
2820 input0,
2821 shape1,
2822 input1,
2823 shape0,
2824 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002825}
2826
2827
2828LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
2829 armnn::IWorkloadFactory& workloadFactory,
2830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2831{
2832 unsigned int shape0[] = { 1, 2, 2, 2 };
2833 std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
2834
2835 unsigned int shape1[] = { 1, 1, 1, 1 };
2836 std::vector<float> input1({ 5 });
2837
2838 std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
2839
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002840 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
2841 workloadFactory,
2842 memoryManager,
2843 shape0,
2844 input0,
2845 shape1,
2846 input1,
2847 shape0,
2848 output);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002849}
2850
2851LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
2852 armnn::IWorkloadFactory & workloadFactory,
2853 const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
2854{
2855 const unsigned int shape0[] = { 1, 2, 2, 3 };
2856 const unsigned int shape1[] = { 1, 1, 1, 3 };
2857
2858 std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
2859 7, 1, 2, 3, 4, 5 });
2860
2861 std::vector<uint8_t> input1({ 1, 2, 3});
2862
2863 std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
2864 1, 1, 2, 1, 2, 3 });
2865
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00002866 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
2867 workloadFactory,
2868 memoryManager,
2869 shape0,
2870 input0,
2871 shape1,
2872 input1,
2873 shape0,
2874 output,
2875 1.0f,
2876 0);
Éanna Ó Catháin20e58802018-12-04 10:29:06 +00002877}
2878
Sadik Armagan2999a022019-04-09 14:20:12 +01002879LayerTestResult<int16_t, 4> MinimumInt16Test(
2880 armnn::IWorkloadFactory& workloadFactory,
2881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2882{
2883 unsigned int shape[] = { 2, 2, 2, 2 };
2884
2885 std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
2886 3, 3, 3, 3, 4, 4, 4, 4 });
2887
2888 std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
2889 4, 4, 4, 4, 5, 5, 5, 5 });
2890
2891 std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
2892 3, 3, 3, 3, 4, 4, 4, 4 });
2893
2894 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2895 workloadFactory,
2896 memoryManager,
2897 shape,
2898 input0,
2899 shape,
2900 input1,
2901 shape,
2902 output,
2903 1.0f,
2904 0);
2905}
2906
2907LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
2908 armnn::IWorkloadFactory& workloadFactory,
2909 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2910{
2911 const unsigned int shape0[] = { 1, 2, 2, 3 };
2912 const unsigned int shape1[] = { 1, 1, 1, 1 };
2913
2914 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2915 7, 8, 9, 10, 11, 12 });
2916
2917 std::vector<int16_t> input1({2});
2918
2919 std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
2920 2, 2, 2, 2, 2, 2 });
2921
2922 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2923 workloadFactory,
2924 memoryManager,
2925 shape0,
2926 input0,
2927 shape1,
2928 input1,
2929 shape0,
2930 output,
2931 1.0f,
2932 0);
2933}
2934
2935LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
2936 armnn::IWorkloadFactory& workloadFactory,
2937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2938{
2939 const unsigned int shape0[] = { 1, 2, 2, 3 };
2940 const unsigned int shape1[] = { 1, 1, 1, 3 };
2941
2942 std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
2943 7, 8, 9, 10, 11, 12 });
2944
2945 std::vector<int16_t> input1({ 1, 10, 3});
2946
2947 std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
2948 1, 8, 3, 1, 10, 3 });
2949
2950 return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
2951 workloadFactory,
2952 memoryManager,
2953 shape0,
2954 input0,
2955 shape1,
2956 input1,
2957 shape0,
2958 output,
2959 1.0f,
2960 0);
2961}
2962
Francis Murtaghe7a86a42018-08-29 12:42:10 +01002963namespace {
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00002964LayerTestResult<float,4> MultiplicationTestHelper(
2965 armnn::IWorkloadFactory& workloadFactory,
2966 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2967 const unsigned int shape0[4],
2968 const std::vector<float> & values0,
2969 const unsigned int shape1[4],
2970 const std::vector<float> & values1,
2971 const unsigned int outShape[4],
2972 const std::vector<float> & outValues)
telsoa014fcda012018-03-09 14:13:49 +00002973{
surmeh01bceff2f2018-03-29 16:29:27 +01002974 const size_t dimensionCount = 4;
2975 armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
2976 armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
2977 armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
telsoa014fcda012018-03-09 14:13:49 +00002978
surmeh01bceff2f2018-03-29 16:29:27 +01002979 auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
2980 auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00002981
2982 LayerTestResult<float,4> ret(outputTensorInfo);
2983
2984 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
2985 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
2986 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
2987
2988 armnn::MultiplicationQueueDescriptor data;
2989 armnn::WorkloadInfo info;
2990 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
2991 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2992 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2993
2994 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
2995
2996 inputHandle0->Allocate();
2997 inputHandle1->Allocate();
2998 outputHandle->Allocate();
2999
3000 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3001 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3002
Derek Lambertif30f7d32019-04-09 10:25:02 +01003003 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003004 workload->Execute();
3005
3006 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3007
surmeh01bceff2f2018-03-29 16:29:27 +01003008 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00003009 return ret;
3010}
surmeh01bceff2f2018-03-29 16:29:27 +01003011} // anonymous namespace
3012
3013
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003014LayerTestResult<float,4> MultiplicationTest(
3015 armnn::IWorkloadFactory& workloadFactory,
3016 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003017{
3018 const unsigned int width = 2;
3019 const unsigned int height = 2;
3020 const unsigned int channelCount = 2;
3021 const unsigned int batchSize = 2;
3022
3023 unsigned int shape[] = { batchSize, channelCount, height, width };
3024
3025 std::vector<float> input0({
3026 1, 1, 1, 1, 2, 2, 2, 2,
3027 3, 3, 3, 3, 4, 4, 4, 4 });
3028
3029 std::vector<float> input1({
3030 2, 2, 2, 2, 3, 3, 3, 3,
3031 4, 4, 4, 4, 5, 5, 5, 5 });
3032
3033 std::vector<float> output({
3034 2, 2, 2, 2, 6, 6, 6, 6,
3035 12, 12, 12, 12, 20, 20, 20, 20 });
3036
3037 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003038 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003039 shape,
3040 input0,
3041 shape,
3042 input1,
3043 shape,
3044 output);
3045}
3046
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003047LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
3048 armnn::IWorkloadFactory& workloadFactory,
3049 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003050{
3051 unsigned int shape0[] = { 1, 2, 2, 2 };
3052 std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
3053
3054 unsigned int shape1[] = { 1, 1, 1, 1 };
3055 std::vector<float> input1({ 2 });
3056
3057 std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
3058
3059 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003060 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003061 shape0,
3062 input0,
3063 shape1,
3064 input1,
3065 shape0,
3066 output);
3067}
3068
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003069LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
3070 armnn::IWorkloadFactory& workloadFactory,
3071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01003072{
3073 unsigned int shape0[] = { 1, 3, 3, 2 };
3074 std::vector<float> input0({
3075 1, 2, 3, 4, 5, 6,
3076 7, 8, 9, 10, 11, 12,
3077 13, 14, 15, 16, 17, 18});
3078
3079 unsigned int shape1[] = { 1, 1, 1, 2 };
3080 std::vector<float> input1({ 1, 2 });
3081
3082 std::vector<float> output({
3083 1, 4, 3, 8, 5, 12,
3084 7, 16, 9, 20, 11, 24,
3085 13, 28, 15, 32, 17, 36});
3086
3087 return MultiplicationTestHelper(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003088 memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +01003089 shape0,
3090 input0,
3091 shape1,
3092 input1,
3093 shape0,
3094 output);
3095}
telsoa014fcda012018-03-09 14:13:49 +00003096
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003097LayerTestResult<float,4> CompareMultiplicationTest(
3098 armnn::IWorkloadFactory& workloadFactory,
3099 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3100 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003101{
3102 const unsigned int width = 16;
3103 const unsigned int height = 32;
3104 const unsigned int channelCount = 2;
3105 const unsigned int batchSize = 5;
3106
3107 armnn::TensorInfo inputTensorInfo0;
3108 armnn::TensorInfo inputTensorInfo1;
3109 armnn::TensorInfo outputTensorInfo;
3110
3111 constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
3112
3113 inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3114 inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3115 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3116
3117 LayerTestResult<float,4> comparisonResult(outputTensorInfo);
3118
3119 auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
3120 auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
3121
3122 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
3123 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
3124 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3125
3126 std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
3127 std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
3128 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3129
3130 armnn::MultiplicationQueueDescriptor data;
3131 armnn::WorkloadInfo info;
3132 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
3133 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
3134 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3135
3136 armnn::MultiplicationQueueDescriptor refData = data;
3137 armnn::WorkloadInfo refInfo = info;
3138 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
3139 SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
3140 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3141
3142 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
3143 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
3144
3145 inputHandle0->Allocate();
3146 inputHandle1->Allocate();
3147 outputHandle->Allocate();
3148 inputHandle0Ref->Allocate();
3149 inputHandle1Ref->Allocate();
3150 outputHandleRef->Allocate();
3151
3152 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
3153 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
3154 CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
3155 CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
3156
Derek Lambertif30f7d32019-04-09 10:25:02 +01003157 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003158 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003159 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003160 workloadRef->Execute();
telsoa014fcda012018-03-09 14:13:49 +00003161 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
3162 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
3163
3164 return comparisonResult;
3165}
3166
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003167LayerTestResult<float,4> CompareBatchNormTest(
3168 armnn::IWorkloadFactory& workloadFactory,
3169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3170 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00003171{
3172 const unsigned int width = 2;
3173 const unsigned int height = 3;
3174 const unsigned int channels = 5;
3175 const unsigned int batchSize = 3;
3176
3177 armnn::TensorInfo inputTensorInfo;
3178 armnn::TensorInfo outputTensorInfo;
3179 armnn::TensorInfo tensorInfo;
3180
3181 constexpr unsigned int shape[] = {batchSize, channels, height, width};
3182 constexpr unsigned int tensorShape[] = {channels};
3183
3184 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3185 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
3186 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
3187
3188 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
3189
3190 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
3191 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
3192 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
3193 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
3194
3195 LayerTestResult<float,4> ret(outputTensorInfo);
3196
3197 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3198 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3199
3200 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
3201 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
3202
3203 armnn::BatchNormalizationQueueDescriptor data;
3204 armnn::WorkloadInfo info;
3205 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
3206 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
3207 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
3208 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
3209
3210 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
3211 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
3212 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
3213 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
3214
3215 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
3216 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3217 data.m_Mean = &meanTensor;
3218 data.m_Variance = &varianceTensor;
3219 data.m_Beta = &betaTensor;
3220 data.m_Gamma = &gammaTensor;
3221 data.m_Parameters.m_Eps = 0.01f;
3222
3223 armnn::BatchNormalizationQueueDescriptor refData = data;
3224 armnn::WorkloadInfo refInfo = info;
3225 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3226 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3227
3228 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
3229 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
3230
3231 inputHandle->Allocate();
3232 outputHandle->Allocate();
3233 inputHandleRef->Allocate();
3234 outputHandleRef->Allocate();
3235
3236 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
3237 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
3238
Derek Lambertif30f7d32019-04-09 10:25:02 +01003239 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003240 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01003241 workloadRef->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003242 workloadRef->Execute();
3243
3244 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
3245 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
3246
3247 return ret;
3248}
3249
surmeh013537c2c2018-05-18 16:31:43 +01003250template<typename T>
3251void PermuteTensorData(
3252 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003254 const armnn::PermutationVector& mappings,
3255 armnn::TensorInfo & inputTensorInfo,
3256 const T * inputData,
3257 std::vector<T>& outputData)
telsoa014fcda012018-03-09 14:13:49 +00003258{
surmeh013537c2c2018-05-18 16:31:43 +01003259 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
3260 if (inputData == nullptr)
3261 {
3262 // Nullptr is an error in the test. By returning without doing the concatenation
3263 // I expect the caller to fail the test. It still makes sense to report this as
3264 // an assert for Debug builds.
3265 return;
3266 }
telsoa014fcda012018-03-09 14:13:49 +00003267
surmeh013537c2c2018-05-18 16:31:43 +01003268 armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
3269
3270 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
3271 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3272
3273 armnn::PermuteQueueDescriptor queueDescriptor;
3274 queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
3275 armnn::WorkloadInfo workloadInfo;
3276 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
3277 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3278
3279 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
3280
3281 inputHandle->Allocate();
3282 outputHandle->Allocate();
3283
3284 CopyDataToITensorHandle(inputHandle.get(), inputData);
3285
Derek Lambertif30f7d32019-04-09 10:25:02 +01003286 workload->PostAllocationConfigure();
surmeh013537c2c2018-05-18 16:31:43 +01003287 workload->Execute();
3288
3289 outputData.resize(outputTensorInfo.GetNumElements());
3290 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
3291 inputTensorInfo = outputTensorInfo;
3292}
3293
Jim Flynn825af452019-05-20 12:49:28 +01003294armnn::OriginsDescriptor CreateDescriptorForConcatenation(
surmeh013537c2c2018-05-18 16:31:43 +01003295 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3296 unsigned int concatDim)
3297{
telsoa014fcda012018-03-09 14:13:49 +00003298 std::vector<armnn::TensorShape> shapes;
3299 shapes.reserve(inputTensorInfos.size());
3300 for (const armnn::TensorInfo& it: inputTensorInfos)
3301 {
3302 shapes.push_back(it.GetShape());
3303 }
surmeh013537c2c2018-05-18 16:31:43 +01003304
Jim Flynn825af452019-05-20 12:49:28 +01003305 return armnn::CreateDescriptorForConcatenation(shapes.begin(),
3306 shapes.end(),
3307 concatDim);
surmeh013537c2c2018-05-18 16:31:43 +01003308}
3309
3310//
narpra015cdda352018-11-19 15:30:27 +00003311// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
3312// In case of <4 dimensions we need to make sure that the concat dimensions are at least
3313// the 3rd slowest iterating one or the inner most dimension.
surmeh013537c2c2018-05-18 16:31:43 +01003314//
3315
3316bool NeedPermuteForConcat(
3317 const std::vector<armnn::TensorInfo> & inputTensorInfos,
3318 unsigned int concatDim)
3319{
3320 // See note above. Additionally we expect the input shapes to have the
3321 // same number of dimensions.
3322 unsigned int nDimensions = 0;
3323
telsoa01c577f2c2018-08-31 09:22:23 +01003324 // Determine the number of dimensions as well as sanity check them
3325 // agains test implementation issues.
surmeh013537c2c2018-05-18 16:31:43 +01003326 for (auto && tensorInfo : inputTensorInfos)
3327 {
3328 if (!nDimensions)
3329 {
3330 nDimensions = tensorInfo.GetShape().GetNumDimensions();
3331 }
3332 else
3333 {
3334 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
3335 "Input shapes must have the same number of dimensions");
3336 }
3337 }
3338
narpra015cdda352018-11-19 15:30:27 +00003339 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
surmeh013537c2c2018-05-18 16:31:43 +01003340}
3341
3342armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
3343{
3344 unsigned int numDims = inputShape.GetNumDimensions();
3345 if (numDims >= 3)
3346 {
3347 // Nothing to do if the inputShape has at least 3 dimensions.
3348 return inputShape;
3349 }
3350
3351 std::vector<unsigned int> newDims(size_t(3), 1u);
3352 unsigned int expandedBy = 3 - numDims;
3353 for (unsigned int i=0; i<numDims; ++i)
3354 {
3355 newDims[expandedBy+i] = inputShape[i];
3356 }
3357 return armnn::TensorShape(3u, &newDims[0]);
3358}
3359
3360void Generate3dPermuteVectorForConcat(
3361 unsigned int numDimensions,
3362 unsigned int & concatDim,
3363 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
3364{
3365 BOOST_ASSERT_MSG(numDimensions <= 3,
3366 "Only dimensions 1,2 and 3 are supported by this helper");
surmeh013537c2c2018-05-18 16:31:43 +01003367 unsigned int expandedBy = 3 - numDimensions;
3368 unsigned int expandedConcatAxis = concatDim + expandedBy;
3369
3370 if (expandedConcatAxis == 2)
3371 {
3372 concatDim = 0;
3373 armnn::PermutationVector forwardPermutation({1, 2, 0});
3374 armnn::PermutationVector reversePermutation({2, 0, 1});
3375 permutations = std::make_pair(forwardPermutation, reversePermutation);
3376 }
3377 else if (expandedConcatAxis == 1)
3378 {
3379 concatDim = 0;
3380 armnn::PermutationVector forwardPermutation({2, 0, 1});
3381 armnn::PermutationVector reversePermutation({1, 2, 0});
3382 permutations = std::make_pair(forwardPermutation, reversePermutation);
3383 }
3384 else
3385 {
3386 BOOST_ASSERT(expandedConcatAxis == 0);
3387 concatDim = 0;
3388 }
3389}
3390
3391//
3392// Permute the input tensors so we can do a supported concatenation.
3393// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
3394// at the front. Finally this function tells what the output shape
3395// of the permuted concatenated tensor is going to be.
3396//
3397template <typename T>
3398void PermuteInputsForConcat(
3399 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003400 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003401 std::vector<armnn::TensorInfo> & inputTensorInfos,
3402 std::vector<T *> & inputData,
3403 std::vector<std::vector<T>> & inputDataStorage,
3404 armnn::PermutationVector & permuteVector,
3405 unsigned int & concatDim,
3406 armnn::TensorInfo & outputTensorInfo)
3407{
3408 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
3409 "Expecting more than one tensor to be concatenated here");
3410
3411 unsigned int numDims = 0;
3412 unsigned int nthInput = 0;
3413 const armnn::PermutationVector identity({0, 1, 2});
3414
3415 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
3416 std::make_pair(identity, identity);
3417
3418 inputDataStorage.resize(inputData.size());
3419
3420 for (auto && tensorInfo : inputTensorInfos)
3421 {
3422 if (numDims == 0)
3423 {
3424 numDims = tensorInfo.GetShape().GetNumDimensions();
3425 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
narpra015cdda352018-11-19 15:30:27 +00003426
telsoa01c577f2c2018-08-31 09:22:23 +01003427 // Store the reverese permutation.
surmeh013537c2c2018-05-18 16:31:43 +01003428 permuteVector = permutations.second;
3429 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
3430 "Test logic error, we don't need permutation, so we shouldn't arrive here");
3431 }
3432 else
3433 {
3434 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
3435 "All inputs must have the same number of dimensions");
3436 }
3437
3438 armnn::TensorInfo newTensorInfo = tensorInfo;
3439 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
3440
3441 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003442 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003443 permutations.first,
3444 newTensorInfo,
3445 inputData[nthInput],
3446 inputDataStorage[nthInput]);
3447
3448 inputData[nthInput] = inputDataStorage[nthInput].data();
3449 inputTensorInfos[nthInput] = newTensorInfo;
3450
3451 ++nthInput;
3452 }
3453
3454 outputTensorInfo.SetShape(
3455 armnnUtils::Permuted(
3456 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
3457 permutations.first));
3458}
3459
3460
3461//
3462// This is the pair of PermuteInputsForConcat(...) which permutes back
telsoa01c577f2c2018-08-31 09:22:23 +01003463// the output of the concatenation so we can check it against an expected
surmeh013537c2c2018-05-18 16:31:43 +01003464// output.
3465//
3466template <typename T>
3467void PermuteOutputForConcat(
3468 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003470 const armnn::TensorInfo & tensorInfo,
3471 const armnn::PermutationVector & permuteVector,
3472 std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
3473 T * data)
3474{
3475 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
3476 if (data == nullptr)
3477 {
3478 // Nullptr is an error in the test. By returning without doing the permutation
3479 // I expect the caller to fail the test. It still makes sense to report this as
3480 // an assert for Debug builds.
3481 return;
3482 }
3483
3484 armnn::TensorInfo resultTensorInfo = tensorInfo;
3485 std::vector<T> inputData(tensorInfo.GetNumElements());
3486 std::vector<T> outputData;
3487
3488 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
3489
3490 PermuteTensorData<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003491 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003492 permuteVector,
3493 resultTensorInfo,
3494 &inputData[0],
3495 outputData);
3496
3497 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
3498}
3499
3500template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003501void Concatenate(
3502 armnn::IWorkloadFactory& workloadFactory,
3503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3504 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
3505 std::initializer_list<T *> inputsOrig,
3506 const armnn::TensorInfo& outputTensorInfoOrig,
3507 T * output,
narpra015cdda352018-11-19 15:30:27 +00003508 unsigned int concatDim,
3509 bool useSubtensor)
surmeh013537c2c2018-05-18 16:31:43 +01003510{
3511 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
3512 if (output == nullptr)
3513 {
3514 // Nullptr is an error in the test. By returning without doing the permutation
3515 // I expect the caller to fail the test. It still makes sense to report this as
3516 // an assert for Debug builds.
3517 return;
3518 }
3519
telsoa01c577f2c2018-08-31 09:22:23 +01003520 // Saves a copy of the parameters which we might need to change.
surmeh013537c2c2018-05-18 16:31:43 +01003521 std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
3522 std::vector<T *> inputs = inputsOrig;
3523 armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
3524
3525 armnn::PermutationVector permuteVector{0, 1, 2};
3526
telsoa01c577f2c2018-08-31 09:22:23 +01003527 // Holds and automatically releases memory for the reshaped input data.
surmeh013537c2c2018-05-18 16:31:43 +01003528 std::vector<std::vector<T>> tmpInputDataStorage;
3529
3530 const size_t inputCount = inputTensorInfos.size();
3531
3532 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
3533
3534 if (needPermuteForConcat)
3535 {
3536 //
3537 // We need to permute the inputs, because concatenation along
telsoa01c577f2c2018-08-31 09:22:23 +01003538 // the requested axis is not supported.
surmeh013537c2c2018-05-18 16:31:43 +01003539 //
3540 PermuteInputsForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003541 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003542 inputTensorInfos,
3543 inputs,
3544 tmpInputDataStorage,
3545 permuteVector,
3546 concatDim,
3547 outputTensorInfo);
3548 }
3549
narpra015cdda352018-11-19 15:30:27 +00003550 armnn::WorkloadInfo workloadInfo;
telsoa014fcda012018-03-09 14:13:49 +00003551
3552 std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
3553 inputHandles.reserve(inputCount);
3554
narpra015cdda352018-11-19 15:30:27 +00003555 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
3556
Jim Flynne242f2d2019-05-22 14:24:13 +01003557 armnn::ConcatQueueDescriptor queueDescriptor;
Jim Flynn825af452019-05-20 12:49:28 +01003558 armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
narpra015cdda352018-11-19 15:30:27 +00003559 queueDescriptor.m_Parameters = viewsDescriptor;
3560
3561 if (useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00003562 {
narpra015cdda352018-11-19 15:30:27 +00003563 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
3564 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
3565 {
3566 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
3567 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
3568 }
telsoa014fcda012018-03-09 14:13:49 +00003569
narpra015cdda352018-11-19 15:30:27 +00003570 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +00003571
narpra015cdda352018-11-19 15:30:27 +00003572 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
3573 for (unsigned int i = 0; i < inputCount; ++i)
3574 {
3575 const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
3576 std::unique_ptr<armnn::ITensorHandle> inputHandle =
3577 subTensorsSupported ?
3578 workloadFactory.CreateSubTensorHandle(*outputHandle,
3579 inputTensorInfo.GetShape(),
3580 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
3581 workloadFactory.CreateTensorHandle(inputTensorInfo);
3582
3583 inputHandles.emplace_back(std::move(inputHandle));
3584 }
3585
telsoa014fcda012018-03-09 14:13:49 +00003586 }
narpra015cdda352018-11-19 15:30:27 +00003587 else
3588 {
3589 for (unsigned int i = 0; i < inputCount; ++i)
3590 {
3591 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
3592 inputHandles.emplace_back(std::move(inputHandle));
3593 }
3594 }
telsoa014fcda012018-03-09 14:13:49 +00003595
3596 for (unsigned int i = 0; i < inputCount; ++i)
3597 {
surmeh013537c2c2018-05-18 16:31:43 +01003598 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
telsoa014fcda012018-03-09 14:13:49 +00003599 }
3600
3601 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
3602
Jim Flynn4ed6c832019-05-20 11:02:46 +01003603 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
telsoa014fcda012018-03-09 14:13:49 +00003604
3605 for (auto& inputHandle : inputHandles)
3606 {
3607 inputHandle->Allocate();
3608 }
3609
3610 outputHandle->Allocate();
3611
3612 unsigned int nextInputId = 0;
3613 for (auto& inputHandle : inputHandles)
3614 {
surmeh013537c2c2018-05-18 16:31:43 +01003615 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
3616 ++nextInputId;
telsoa014fcda012018-03-09 14:13:49 +00003617 }
3618
Derek Lambertif30f7d32019-04-09 10:25:02 +01003619 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00003620 workload->Execute();
3621
surmeh013537c2c2018-05-18 16:31:43 +01003622 if (needPermuteForConcat)
3623 {
3624 PermuteOutputForConcat<T>(workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003625 memoryManager,
surmeh013537c2c2018-05-18 16:31:43 +01003626 outputTensorInfo,
3627 permuteVector,
3628 std::move(outputHandle),
3629 output);
3630 }
3631 else
3632 {
3633 CopyDataFromITensorHandle(output, outputHandle.get());
3634 }
telsoa014fcda012018-03-09 14:13:49 +00003635}
3636
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003637template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003638LayerTestResult<T, 1> Concatenation1dTestImpl(
3639 armnn::IWorkloadFactory& workloadFactory,
3640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3641 float qScale,
3642 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003643{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003644 armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003645
3646 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
3647 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
3648 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
3649
Jim Flynncbb66aa2019-05-15 13:03:54 +01003650 armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003651
3652 LayerTestResult<T, 1> result(outputTensorInfo);
3653
3654 std::vector<T> output;
3655 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003656 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003657 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3658 { input0.data(), input1.data(), input2.data() },
3659 outputTensorInfo,
3660 output.data(),
3661 0,
3662 true);
telsoa014fcda012018-03-09 14:13:49 +00003663
3664 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
3665 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3666 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
3667 }));
3668
3669 return result;
3670}
3671
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003672LayerTestResult<float, 1> Concatenation1dTest(
3673 armnn::IWorkloadFactory& workloadFactory,
3674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003675{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003676 return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003677}
3678
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003679template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003680LayerTestResult<T, 2> Concatenation2dTestImpl(
3681 armnn::IWorkloadFactory& workloadFactory,
3682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003683 const armnn::TensorInfo& outputTensorInfo,
3684 unsigned int dimension,
3685 const float qScale,
3686 const int32_t qOffset)
3687{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003688 armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003689
3690 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3691 // Batch 0
3692 1.0f, 2.0f, 3.0f,
3693
3694 // Batch 1
3695 10.0f, 11.0f, 12.0f,
3696 }));
3697
3698 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3699 // Batch 0
3700 4.0f, 5.0f, 6.0f,
3701
3702 // Batch 1
3703 13.0f, 14.0f, 15.0f,
3704 }));
3705
3706 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3707 // Batch 0
3708 7.0f, 8.0f, 9.0f,
3709
3710 // Batch 1
3711 16.0f, 17.0f, 18.0f,
3712 }));
3713
3714 LayerTestResult<T, 2> result(outputTensorInfo);
3715
3716 std::vector<T> output;
3717 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003718 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003719 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
3720 { input0.data(), input1.data(), input2.data() },
3721 outputTensorInfo,
3722 output.data(),
3723 dimension,
3724 true);
telsoa014fcda012018-03-09 14:13:49 +00003725
3726 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3727 return result;
3728}
3729
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003730template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003731LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
3732 armnn::IWorkloadFactory& workloadFactory,
3733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3734 float qScale,
3735 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003736{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003737 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003738
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003739 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3740 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
3741
telsoa014fcda012018-03-09 14:13:49 +00003742 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3743 // Batch 0
3744 1.0f, 2.0f, 3.0f,
3745
3746 // Batch 1
3747 10.0f, 11.0f, 12.0f,
3748
3749 // Batch 2
3750 4.0f, 5.0f, 6.0f,
3751
3752 // Batch 3
3753 13.0f, 14.0f, 15.0f,
3754
3755 // Batch 4
3756 7.0f, 8.0f, 9.0f,
3757
3758 // Batch 5
3759 16.0f, 17.0f, 18.0f,
3760 }));
3761
3762 return result;
3763}
3764
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003765LayerTestResult<float, 2> Concatenation2dDim0Test(
3766 armnn::IWorkloadFactory& workloadFactory,
3767 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003768{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003769 return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003770}
3771
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003772template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003773LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
3774 armnn::IWorkloadFactory& workloadFactory,
3775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3776 float qScale,
3777 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00003778{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003779 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003780
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003781 LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
3782 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
3783
telsoa014fcda012018-03-09 14:13:49 +00003784 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3785 // Batch 0
3786 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3787
3788 // Batch 1
3789 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
3790 }));
3791
3792 return result;
3793}
3794
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003795LayerTestResult<float, 2> Concatenation2dDim1Test(
3796 armnn::IWorkloadFactory& workloadFactory,
3797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003798{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003799 return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003800}
3801
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003802template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003803LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
3804 armnn::IWorkloadFactory& workloadFactory,
3805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3806 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003807 int32_t qOffset)
3808{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003809 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003810 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3811 // Batch 0
3812 1.0f, 2.0f, 3.0f,
3813
3814 // Batch 1
3815 10.0f, 11.0f, 12.0f,
3816 }));
3817
Jim Flynncbb66aa2019-05-15 13:03:54 +01003818 armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003819 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3820 // Batch 0
3821 4.0f, 5.0f, 6.0f,
3822
3823 // Batch 1
3824 13.0f, 14.0f, 15.0f,
3825
3826 // Batch 0
3827 7.0f, 8.0f, 9.0f,
3828 }));
3829
Jim Flynncbb66aa2019-05-15 13:03:54 +01003830 armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003831 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3832 // Batch 1
3833 16.0f, 17.0f, 18.0f,
3834 }));
3835
Jim Flynncbb66aa2019-05-15 13:03:54 +01003836 armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003837 LayerTestResult<T, 2> result(outputTensorInfo);
3838
3839 std::vector<T> output;
3840 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003841 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003842 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3843 { input0.data(), input1.data(), input2.data() },
3844 outputTensorInfo,
3845 output.data(),
3846 0,
3847 true);
telsoa014fcda012018-03-09 14:13:49 +00003848
3849 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3850 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3851 // Batch 0
3852 1.0f, 2.0f, 3.0f,
3853
3854 // Batch 1
3855 10.0f, 11.0f, 12.0f,
3856
3857 // Batch 2
3858 4.0f, 5.0f, 6.0f,
3859
3860 // Batch 3
3861 13.0f, 14.0f, 15.0f,
3862
3863 // Batch 4
3864 7.0f, 8.0f, 9.0f,
3865
3866 // Batch 5
3867 16.0f, 17.0f, 18.0f,
3868 }));
3869
3870 return result;
3871}
3872
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003873LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
3874 armnn::IWorkloadFactory& workloadFactory,
3875 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003876{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003877 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
3878 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003879}
3880
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003881template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003882LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
3883 armnn::IWorkloadFactory& workloadFactory,
3884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3885 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00003886 int32_t qOffset)
3887{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003888 armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003889 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3890 // Batch 0
3891 1.0f, 2.0f, 3.0f,
3892
3893 // Batch 1
3894 10.0f, 11.0f, 12.0f,
3895 }));
3896
Jim Flynncbb66aa2019-05-15 13:03:54 +01003897 armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003898 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3899 // Batch 0
3900 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
3901
3902 // Batch 1
3903 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
3904 }));
3905
Jim Flynncbb66aa2019-05-15 13:03:54 +01003906 armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003907 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
3908 // Batch 0
3909 9.0f,
3910
3911 // Batch 1
3912 18.0f
3913 }));
3914
Jim Flynncbb66aa2019-05-15 13:03:54 +01003915 armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003916 LayerTestResult<T, 2> result(outputTensorInfo);
3917
3918 std::vector<T> output;
3919 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003920 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00003921 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
3922 { input0.data(), input1.data(), input2.data() },
3923 outputTensorInfo,
3924 output.data(),
3925 1,
3926 true);
telsoa014fcda012018-03-09 14:13:49 +00003927
3928 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
3929 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3930 // Batch 0
3931 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
3932
3933 // Batch 1
3934 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
3935 }));
3936
3937 return result;
3938}
3939
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003940LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
3941 armnn::IWorkloadFactory& workloadFactory,
3942 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00003943{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003944 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
3945 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00003946}
3947
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00003948template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00003949LayerTestResult<T, 3> Concatenation3dTestImpl(
3950 armnn::IWorkloadFactory& workloadFactory,
3951 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00003952 const armnn::TensorInfo& outputTensorInfo,
3953 unsigned int dimension,
narpra015cdda352018-11-19 15:30:27 +00003954 bool useSubtensor,
telsoa014fcda012018-03-09 14:13:49 +00003955 float qScale,
3956 int32_t qOffset)
3957{
Jim Flynncbb66aa2019-05-15 13:03:54 +01003958 armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00003959
3960 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3961 // Batch 0, Channel 0
3962 1.0f, 2.0f,
3963
3964 // Batch 0, Channel 1
3965 3.0f, 4.0f,
3966
3967 // Batch 0, Channel 2
3968 5.0f, 6.0f,
3969
3970 // Batch 1, Channel 0
3971 19.0f, 20.0f,
3972
3973 // Batch 1, Channel 1
3974 21.0f, 22.0f,
3975
3976 // Batch 1, Channel 2
3977 23.0f, 24.0f
3978 }));
3979
3980 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
3981 // Batch 0, Channel 0
3982 7.0f, 8.0f,
3983
3984 // Batch 0, Channel 1
3985 9.0f, 10.0f,
3986
3987 // Batch 0, Channel 2
3988 11.0f, 12.0f,
3989
3990 // Batch 1, Channel 0
3991 25.0f, 26.0f,
3992
3993 // Batch 1, Channel 1
3994 27.0f, 28.0f,
3995
3996 // Batch 1, Channel 2
3997 29.0f, 30.0f
3998 }));
3999
4000 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4001 // Batch 0, Channel 0
4002 13.0f, 14.0f,
4003
4004 // Batch 0, Channel 1
4005 15.0f, 16.0f,
4006
4007 // Batch 0, Channel 2
4008 17.0f, 18.0f,
4009
4010 // Batch 1, Channel 0
4011 31.0f, 32.0f,
4012
4013 // Batch 1, Channel 1
4014 33.0f, 34.0f,
4015
4016 // Batch 1, Channel 2
4017 35.0f, 36.0f
4018 }));
4019
4020 LayerTestResult<T, 3> result(outputTensorInfo);
4021
4022 std::vector<T> output;
4023 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004024 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004025 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
4026 { input0.data(), input1.data(), input2.data() },
4027 outputTensorInfo,
4028 output.data(),
4029 dimension,
4030 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004031
4032 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4033 return result;
4034}
4035
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004036template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004037LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
4038 armnn::IWorkloadFactory& workloadFactory,
4039 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4040 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004041 int32_t qOffset)
4042{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004043 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004044
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004045 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4046 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4047
telsoa014fcda012018-03-09 14:13:49 +00004048 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4049 // Batch 0, Channel 0
4050 1.0f, 2.0f,
4051
4052 // Batch 0, Channel 1
4053 3.0f, 4.0f,
4054
4055 // Batch 0, Channel 2
4056 5.0f, 6.0f,
4057
4058 // Batch 1, Channel 0
4059 19.0f, 20.0f,
4060
4061 // Batch 1, Channel 1
4062 21.0f, 22.0f,
4063
4064 // Batch 1, Channel 2
4065 23.0f, 24.0f,
4066
4067 // Batch 2, Channel 0
4068 7.0f, 8.0f,
4069
4070 // Batch 2, Channel 1
4071 9.0f, 10.0f,
4072
4073 // Batch 2, Channel 2
4074 11.0f, 12.0f,
4075
4076 // Batch 3, Channel 0
4077 25.0f, 26.0f,
4078
4079 // Batch 3, Channel 1
4080 27.0f, 28.0f,
4081
4082 // Batch 3, Channel 2
4083 29.0f, 30.0f,
4084
4085 // Batch 4, Channel 0
4086 13.0f, 14.0f,
4087
4088 // Batch 4, Channel 1
4089 15.0f, 16.0f,
4090
4091 // Batch 4, Channel 2
4092 17.0f, 18.0f,
4093
4094 // Batch 5, Channel 0
4095 31.0f, 32.0f,
4096
4097 // Batch 5, Channel 1
4098 33.0f, 34.0f,
4099
4100 // Batch 5, Channel 2
4101 35.0f, 36.0f
4102 }));
narpra015cdda352018-11-19 15:30:27 +00004103
telsoa014fcda012018-03-09 14:13:49 +00004104 return result;
4105}
4106
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004107LayerTestResult<float, 3> Concatenation3dDim0Test(
4108 armnn::IWorkloadFactory& workloadFactory,
4109 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004110{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004111 return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004112}
4113
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004114template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004115LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
4116 armnn::IWorkloadFactory& workloadFactory,
4117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4118 float qScale,
4119 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004120{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004121 armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004122
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004123 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4124 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004125
telsoa014fcda012018-03-09 14:13:49 +00004126 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4127 // Batch 0, Channel 0
4128 1.0f, 2.0f,
4129
4130 // Batch 0, Channel 1
4131 3.0f, 4.0f,
4132
4133 // Batch 0, Channel 2
4134 5.0f, 6.0f,
4135
4136 // Batch 0, Channel 3
4137 7.0f, 8.0f,
4138
4139 // Batch 0, Channel 4
4140 9.0f, 10.0f,
4141
4142 // Batch 0, Channel 5
4143 11.0f, 12.0f,
4144
4145 // Batch 0, Channel 6
4146 13.0f, 14.0f,
4147
4148 // Batch 0, Channel 7
4149 15.0f, 16.0f,
4150
4151 // Batch 0, Channel 8
4152 17.0f, 18.0f,
4153
4154 // Batch 1, Channel 0
4155 19.0f, 20.0f,
4156
4157 // Batch 1, Channel 1
4158 21.0f, 22.0f,
4159
4160 // Batch 1, Channel 2
4161 23.0f, 24.0f,
4162
4163 // Batch 1, Channel 3
4164 25.0f, 26.0f,
4165
4166 // Batch 1, Channel 4
4167 27.0f, 28.0f,
4168
4169 // Batch 1, Channel 5
4170 29.0f, 30.0f,
4171
4172 // Batch 1, Channel 6
4173 31.0f, 32.0f,
4174
4175 // Batch 1, Channel 7
4176 33.0f, 34.0f,
4177
4178 // Batch 1, Channel 8
4179 35.0f, 36.0f
4180 }));
4181
4182 return result;
4183}
4184
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004185LayerTestResult<float, 3> Concatenation3dDim1Test(
4186 armnn::IWorkloadFactory& workloadFactory,
4187 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004188{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004189 return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004190}
4191
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004192template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004193LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
4194 armnn::IWorkloadFactory& workloadFactory,
4195 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004196 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004197 float qScale,
4198 int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +00004199{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004200 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004201
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004202 LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
4203 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004204
telsoa014fcda012018-03-09 14:13:49 +00004205 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4206 // Batch 0, Channel 0
4207 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
4208
4209 // Batch 0, Channel 1
4210 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
4211
4212 // Batch 0, Channel 2
4213 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
4214
4215 // Batch 1, Channel 0
4216 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
4217
4218 // Batch 1, Channel 1
4219 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
4220
4221 // Batch 1, Channel 2
4222 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
4223 }));
4224
4225 return result;
4226}
4227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004228LayerTestResult<float, 3> Concatenation3dDim2Test(
4229 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004230 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4231 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00004232{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004233 return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
4234 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004235}
4236
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004237template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004238LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
4239 armnn::IWorkloadFactory& workloadFactory,
4240 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4241 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004242 int32_t qOffset)
4243{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004244 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004245 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4246 // Batch 0, Channel 0
4247 1.0f, 2.0f,
4248
4249 // Batch 0, Channel 1
4250 3.0f, 4.0f,
4251
4252 // Batch 0, Channel 2
4253 5.0f, 6.0f,
4254
4255 // Batch 1, Channel 0
4256 19.0f, 20.0f,
4257
4258 // Batch 1, Channel 1
4259 21.0f, 22.0f,
4260
4261 // Batch 1, Channel 2
4262 23.0f, 24.0f
4263 }));
4264
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004265 armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004266 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4267 // Batch 0, Channel 0
4268 7.0f, 8.0f,
4269
4270 // Batch 0, Channel 1
4271 9.0f, 10.0f,
4272
4273 // Batch 0, Channel 2
4274 11.0f, 12.0f,
4275 }));
4276
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004277 armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004278 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4279 // Batch 0, Channel 0
4280 25.0f, 26.0f,
4281
4282 // Batch 0, Channel 1
4283 27.0f, 28.0f,
4284
4285 // Batch 0, Channel 2
4286 29.0f, 30.0f,
4287
4288 // Batch 1, Channel 0
4289 13.0f, 14.0f,
4290
4291 // Batch 1, Channel 1
4292 15.0f, 16.0f,
4293
4294 // Batch 1, Channel 2
4295 17.0f, 18.0f,
4296
4297 // Batch 2, Channel 0
4298 31.0f, 32.0f,
4299
4300 // Batch 2, Channel 1
4301 33.0f, 34.0f,
4302
4303 // Batch 2, Channel 2
4304 35.0f, 36.0f
4305 }));
4306
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004307 armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00004308 LayerTestResult<T, 3> result(outputTensorInfo);
4309
4310 std::vector<T> output;
4311 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004312 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004313 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4314 { input0.data(), input1.data(), input2.data() },
4315 outputTensorInfo,
4316 output.data(),
4317 0,
4318 true);
telsoa014fcda012018-03-09 14:13:49 +00004319
4320 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4321 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4322 // Batch 0, Channel 0
4323 1.0f, 2.0f,
4324
4325 // Batch 0, Channel 1
4326 3.0f, 4.0f,
4327
4328 // Batch 0, Channel 2
4329 5.0f, 6.0f,
4330
4331 // Batch 1, Channel 0
4332 19.0f, 20.0f,
4333
4334 // Batch 1, Channel 1
4335 21.0f, 22.0f,
4336
4337 // Batch 1, Channel 2
4338 23.0f, 24.0f,
4339
4340 // Batch 2, Channel 0
4341 7.0f, 8.0f,
4342
4343 // Batch 2, Channel 1
4344 9.0f, 10.0f,
4345
4346 // Batch 2, Channel 2
4347 11.0f, 12.0f,
4348
4349 // Batch 3, Channel 0
4350 25.0f, 26.0f,
4351
4352 // Batch 3, Channel 1
4353 27.0f, 28.0f,
4354
4355 // Batch 3, Channel 2
4356 29.0f, 30.0f,
4357
4358 // Batch 4, Channel 0
4359 13.0f, 14.0f,
4360
4361 // Batch 4, Channel 1
4362 15.0f, 16.0f,
4363
4364 // Batch 4, Channel 2
4365 17.0f, 18.0f,
4366
4367 // Batch 5, Channel 0
4368 31.0f, 32.0f,
4369
4370 // Batch 5, Channel 1
4371 33.0f, 34.0f,
4372
4373 // Batch 5, Channel 2
4374 35.0f, 36.0f
4375 }));
4376
4377 return result;
4378}
4379
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004380LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
4381 armnn::IWorkloadFactory& workloadFactory,
4382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004383{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004384 return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
4385 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004386}
4387
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004388template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004389LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
4390 armnn::IWorkloadFactory& workloadFactory,
4391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4392 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004393 int32_t qOffset)
4394{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004395 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004396 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4397 // Batch 0, Channel 0
4398 1.0f, 2.0f,
4399
4400 // Batch 0, Channel 1
4401 3.0f, 4.0f,
4402
4403 // Batch 0, Channel 2
4404 5.0f, 6.0f,
4405
4406 // Batch 1, Channel 0
4407 19.0f, 20.0f,
4408
4409 // Batch 1, Channel 1
4410 21.0f, 22.0f,
4411
4412 // Batch 1, Channel 2
4413 23.0f, 24.0f
4414 }));
4415
Jim Flynncbb66aa2019-05-15 13:03:54 +01004416 armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004417 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4418 // Batch 0, Channel 0
4419 7.0f, 8.0f,
4420
4421 // Batch 0, Channel 1
4422 9.0f, 10.0f,
4423
4424 // Batch 0, Channel 2
4425 11.0f, 12.0f,
4426
4427 // Batch 0, Channel 3
4428 25.0f, 26.0f,
4429
4430 // Batch 1, Channel 0
4431 27.0f, 28.0f,
4432
4433 // Batch 1, Channel 1
4434 29.0f, 30.0f,
4435
4436 // Batch 1, Channel 2
4437 13.0f, 14.0f,
4438
4439 // Batch 1, Channel 3
4440 15.0f, 16.0f,
4441 }));
4442
Jim Flynncbb66aa2019-05-15 13:03:54 +01004443 armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004444 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4445 // Batch 0, Channel 0
4446 17.0f, 18.0f,
4447
4448 // Batch 1, Channel 0
4449 31.0f, 32.0f,
4450 }));
4451
Jim Flynncbb66aa2019-05-15 13:03:54 +01004452 armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004453 LayerTestResult<T, 3> result(outputTensorInfo);
4454
4455 std::vector<T> output;
4456 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004457 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004458 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4459 { input0.data(), input1.data(), input2.data() },
4460 outputTensorInfo,
4461 output.data(),
4462 1,
4463 true);
telsoa014fcda012018-03-09 14:13:49 +00004464
4465 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4466 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4467 // Batch 0, Channel 0
4468 1.0f, 2.0f,
4469
4470 // Batch 0, Channel 1
4471 3.0f, 4.0f,
4472
4473 // Batch 0, Channel 2
4474 5.0f, 6.0f,
4475
4476 // Batch 0, Channel 3
4477 7.0f, 8.0f,
4478
4479 // Batch 0, Channel 4
4480 9.0f, 10.0f,
4481
4482 // Batch 0, Channel 5
4483 11.0f, 12.0f,
4484
4485 // Batch 0, Channel 6
4486 25.0f, 26.0f,
4487
4488 // Batch 0, Channel 7
4489 17.0f, 18.0f,
4490
4491 // Batch 1, Channel 0
4492 19.0f, 20.0f,
4493
4494 // Batch 1, Channel 1
4495 21.0f, 22.0f,
4496
4497 // Batch 1, Channel 2
4498 23.0f, 24.0f,
4499
4500 // Batch 1, Channel 3
4501 27.0f, 28.0f,
4502
4503 // Batch 1, Channel 4
4504 29.0f, 30.0f,
4505
4506 // Batch 1, Channel 5
4507 13.0f, 14.0f,
4508
4509 // Batch 1, Channel 6
4510 15.0f, 16.0f,
4511
4512 // Batch 1, Channel 7
4513 31.0f, 32.0f,
4514 }));
4515
4516 return result;
4517}
4518
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004519LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
4520 armnn::IWorkloadFactory& workloadFactory,
4521 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004522{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004523 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
4524 workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00004525}
4526
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004527template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004528LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
4529 armnn::IWorkloadFactory& workloadFactory,
4530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004531 bool useSubtensor,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004532 float qScale,
telsoa014fcda012018-03-09 14:13:49 +00004533 int32_t qOffset)
4534{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004535 armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004536 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4537 // Batch 0, Channel 0
4538 1.0f, 2.0f,
4539
4540 // Batch 0, Channel 1
4541 3.0f, 4.0f,
4542
4543 // Batch 0, Channel 2
4544 5.0f, 6.0f,
4545
4546 // Batch 1, Channel 0
4547 19.0f, 20.0f,
4548
4549 // Batch 1, Channel 1
4550 21.0f, 22.0f,
4551
4552 // Batch 1, Channel 2
4553 23.0f, 24.0f
4554 }));
4555
Jim Flynncbb66aa2019-05-15 13:03:54 +01004556 armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004557 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4558 // Batch 0, Channel 0
4559 7.0f,
4560
4561 // Batch 0, Channel 1
4562 9.0f,
4563
4564 // Batch 0, Channel 2
4565 11.0f,
4566
4567 // Batch 1, Channel 0
4568 25.0f,
4569
4570 // Batch 1, Channel 1
4571 27.0f,
4572
4573 // Batch 1, Channel 2
4574 29.0f
4575 }));
4576
Jim Flynncbb66aa2019-05-15 13:03:54 +01004577 armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004578 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
4579 // Batch 0, Channel 0
4580 13.0f, 14.0f, 50.0f,
4581
4582 // Batch 0, Channel 1
4583 15.0f, 16.0f, 51.0f,
4584
4585 // Batch 0, Channel 2
4586 17.0f, 18.0f, 52.0f,
4587
4588 // Batch 1, Channel 0
4589 31.0f, 32.0f, 53.0f,
4590
4591 // Batch 1, Channel 1
4592 33.0f, 34.0f, 54.0f,
4593
4594 // Batch 1, Channel 2
4595 35.0f, 36.0f, 55.0f,
4596 }));
4597
Jim Flynncbb66aa2019-05-15 13:03:54 +01004598 armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00004599 LayerTestResult<T, 3> result(outputTensorInfo);
4600
4601 std::vector<T> output;
4602 output.resize(outputTensorInfo.GetNumElements());
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004603 Concatenate<T>(workloadFactory, memoryManager,
narpra015cdda352018-11-19 15:30:27 +00004604 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
4605 { input0.data(), input1.data(), input2.data() },
4606 outputTensorInfo,
4607 output.data(),
4608 2,
4609 useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00004610
4611 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
4612 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4613 // Batch 0, Channel 0
4614 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
4615
4616 // Batch 0, Channel 1
4617 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
4618
4619 // Batch 0, Channel 2
4620 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
4621
4622 // Batch 1, Channel 0
4623 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
4624
4625 // Batch 1, Channel 1
4626 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
4627
4628 // Batch 1, Channel 2
4629 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
4630 }));
4631
4632 return result;
4633}
4634
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004635LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
4636 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00004637 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4638 bool useSubtensor)
4639{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004640 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
4641 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004642}
4643
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004644template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004645LayerTestResult<T, 4> Concatenation4dTestImpl(
4646 armnn::IWorkloadFactory& workloadFactory,
4647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4648 const armnn::TensorInfo& outputTensorInfo,
4649 unsigned int dimension,
4650 bool useSubtensor,
4651 float qScale,
4652 int32_t qOffset)
4653{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004654 armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004655
4656 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4657 1.0f, 2.0f,
4658 3.0f, 4.0f,
4659 5.0f, 6.0f,
4660 7.0f, 8.0f,
4661 9.0f, 10.0f,
4662 11.0f, 12.0f
4663 }));
4664
4665 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4666 11.0f, 12.0f,
4667 13.0f, 14.0f,
4668 15.0f, 16.0f,
4669 17.0f, 18.0f,
4670 19.0f, 20.0f,
4671 21.0f, 22.0f
4672 }));
4673
4674 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4675 21.0f, 22.0f,
4676 23.0f, 24.0f,
4677 25.0f, 26.0f,
4678 27.0f, 28.0f,
4679 29.0f, 30.0f,
4680 31.0f, 32.0f
4681 }));
4682
4683 LayerTestResult<T, 4> result(outputTensorInfo);
4684
4685 std::vector<T> output;
4686 output.resize(outputTensorInfo.GetNumElements());
4687
4688 Concatenate<T>(workloadFactory,
4689 memoryManager,
4690 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
4691 {input0.data(), input1.data(), input2.data()},
4692 outputTensorInfo,
4693 output.data(),
4694 dimension,
4695 useSubtensor);
4696
4697 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4698 return result;
4699}
4700
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004701template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004702LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
4703 armnn::IWorkloadFactory& workloadFactory,
4704 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4705 float qScale,
4706 int32_t qOffset)
4707{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004708 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004709
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004710 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4711 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
4712
narpra015cdda352018-11-19 15:30:27 +00004713 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4714 1.0f, 2.0f,
4715 3.0f, 4.0f,
4716 5.0f, 6.0f,
4717 7.0f, 8.0f,
4718 9.0f, 10.0f,
4719 11.0f, 12.0f,
4720
4721 11.0f, 12.0f,
4722 13.0f, 14.0f,
4723 15.0f, 16.0f,
4724 17.0f, 18.0f,
4725 19.0f, 20.0f,
4726 21.0f, 22.0f,
4727
4728 21.0f, 22.0f,
4729 23.0f, 24.0f,
4730 25.0f, 26.0f,
4731 27.0f, 28.0f,
4732 29.0f, 30.0f,
4733 31.0f, 32.0f
4734 }));
4735 return result;
4736}
4737
4738LayerTestResult<float, 4> Concatenation4dDim0Test(
4739 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00004740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00004741{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004742 return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004743}
4744
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004745template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004746LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
4747 armnn::IWorkloadFactory& workloadFactory,
4748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4749 float qScale,
4750 int32_t qOffset)
4751{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004752 armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004753
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004754 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4755 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
4756
narpra015cdda352018-11-19 15:30:27 +00004757 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4758 1.0f, 2.0f,
4759 3.0f, 4.0f,
4760 5.0f, 6.0f,
4761 7.0f, 8.0f,
4762 9.0f, 10.0f,
4763 11.0f, 12.0f,
4764
4765 11.0f, 12.0f,
4766 13.0f, 14.0f,
4767 15.0f, 16.0f,
4768 17.0f, 18.0f,
4769 19.0f, 20.0f,
4770 21.0f, 22.0f,
4771
4772 21.0f, 22.0f,
4773 23.0f, 24.0f,
4774 25.0f, 26.0f,
4775 27.0f, 28.0f,
4776 29.0f, 30.0f,
4777 31.0f, 32.0f
4778 }));
4779
4780 return result;
4781}
4782
4783LayerTestResult<float, 4> Concatenation4dDim1Test(
4784 armnn::IWorkloadFactory& workloadFactory,
4785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4786{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004787 return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004788}
4789
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004790template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004791LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
4792 armnn::IWorkloadFactory& workloadFactory,
4793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4794 float qScale,
4795 int32_t qOffset)
4796{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004797 armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004798
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004799 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4800 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
4801
narpra015cdda352018-11-19 15:30:27 +00004802 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4803 1.0f, 2.0f,
4804 3.0f, 4.0f,
4805 11.0f, 12.0f,
4806 13.0f, 14.0f,
4807 21.0f, 22.0f,
4808 23.0f, 24.0f,
4809
4810 5.0f, 6.0f,
4811 7.0f, 8.0f,
4812 15.0f, 16.0f,
4813 17.0f, 18.0f,
4814 25.0f, 26.0f,
4815 27.0f, 28.0f,
4816
4817 9.0f, 10.0f,
4818 11.0f, 12.0f,
4819 19.0f, 20.0f,
4820 21.0f, 22.0f,
4821 29.0f, 30.0f,
4822 31.0f, 32.0f
4823 }));
4824
4825 return result;
4826}
4827
4828LayerTestResult<float, 4> Concatenation4dDim2Test(
4829 armnn::IWorkloadFactory& workloadFactory,
4830 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4831{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004832 return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004833}
4834
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004835template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004836LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
4837 armnn::IWorkloadFactory& workloadFactory,
4838 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4839 float qScale,
4840 int32_t qOffset,
4841 bool useSubtensor)
4842{
Jim Flynncbb66aa2019-05-15 13:03:54 +01004843 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004844
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004845 LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
4846 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
4847
narpra015cdda352018-11-19 15:30:27 +00004848 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4849 1.0f, 2.0f,
4850 11.0f, 12.0f,
4851 21.0f, 22.0f,
4852 3.0f, 4.0f,
4853 13.0f, 14.0f,
4854 23.0f, 24.0f,
4855
4856 5.0f, 6.0f,
4857 15.0f, 16.0f,
4858 25.0f, 26.0f,
4859 7.0f, 8.0f,
4860 17.0f, 18.0f,
4861 27.0f, 28.0f,
4862
4863 9.0f, 10.0f,
4864 19.0f, 20.0f,
4865 29.0f, 30.0f,
4866 11.0f, 12.0f,
4867 21.0f, 22.0f,
4868 31.0f, 32.0f
4869 }));
4870
4871 return result;
4872}
4873
4874LayerTestResult<float, 4> Concatenation4dDim3Test(
4875 armnn::IWorkloadFactory& workloadFactory,
4876 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4877 bool useSubtensor)
4878{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004879 return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
4880 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00004881}
4882
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004883template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004884LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
4885 armnn::IWorkloadFactory& workloadFactory,
4886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4887 float qScale,
4888 int32_t qOffset)
4889{
4890 unsigned int dimension = 0;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004891 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004892
4893 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4894 1.0f, 2.0f,
4895 3.0f, 4.0f,
4896 5.0f, 6.0f,
4897 7.0f, 8.0f,
4898 9.0f, 10.0f,
4899 11.0f, 12.0f
4900 }));
4901
Jim Flynncbb66aa2019-05-15 13:03:54 +01004902 armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004903
4904 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4905 11.0f, 12.0f,
4906 13.0f, 14.0f,
4907 15.0f, 16.0f,
4908 17.0f, 18.0f,
4909 19.0f, 20.0f,
4910 21.0f, 22.0f,
4911
4912 21.0f, 22.0f,
4913 23.0f, 24.0f,
4914 25.0f, 26.0f,
4915 27.0f, 28.0f,
4916 29.0f, 30.0f,
4917 31.0f, 32.0f
4918
4919 }));
4920
Jim Flynncbb66aa2019-05-15 13:03:54 +01004921 armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004922
4923 LayerTestResult<T, 4> result(outputTensorInfo);
4924
4925 std::vector<T> output;
4926 output.resize(outputTensorInfo.GetNumElements());
4927 Concatenate<T>(workloadFactory,
4928 memoryManager,
4929 {inputTensorInfo0, inputTensorInfo1},
4930 {input0.data(), input1.data()},
4931 outputTensorInfo,
4932 output.data(),
4933 dimension,
4934 true);
4935
4936 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
4937 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
4938 1.0f, 2.0f,
4939 3.0f, 4.0f,
4940 5.0f, 6.0f,
4941 7.0f, 8.0f,
4942 9.0f, 10.0f,
4943 11.0f, 12.0f,
4944
4945 11.0f, 12.0f,
4946 13.0f, 14.0f,
4947 15.0f, 16.0f,
4948 17.0f, 18.0f,
4949 19.0f, 20.0f,
4950 21.0f, 22.0f,
4951
4952 21.0f, 22.0f,
4953 23.0f, 24.0f,
4954 25.0f, 26.0f,
4955 27.0f, 28.0f,
4956 29.0f, 30.0f,
4957 31.0f, 32.0f
4958 }));
4959
4960 return result;
4961}
4962
4963LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
4964 armnn::IWorkloadFactory& workloadFactory,
4965 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
4966{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004967 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
4968 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00004969}
4970
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00004971template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00004972LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
4973 armnn::IWorkloadFactory& workloadFactory,
4974 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4975 float qScale,
4976 int32_t qOffset)
4977{
4978 unsigned int dimension = 1;
Jim Flynncbb66aa2019-05-15 13:03:54 +01004979 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004980
4981 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
4982 1.0f, 2.0f,
4983 3.0f, 4.0f,
4984 5.0f, 6.0f,
4985 7.0f, 8.0f,
4986 9.0f, 10.0f,
4987 11.0f, 12.0f
4988 }));
4989
Jim Flynncbb66aa2019-05-15 13:03:54 +01004990 armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00004991
4992 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
4993 11.0f, 12.0f,
4994 13.0f, 14.0f,
4995 15.0f, 16.0f,
4996 17.0f, 18.0f,
4997
4998 }));
4999
Jim Flynncbb66aa2019-05-15 13:03:54 +01005000 armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005001
5002 LayerTestResult<T, 4> result(outputTensorInfo);
5003
5004 std::vector<T> output;
5005 output.resize(outputTensorInfo.GetNumElements());
5006 Concatenate<T>(workloadFactory,
5007 memoryManager,
5008 {inputTensorInfo0, inputTensorInfo1},
5009 {input0.data(), input1.data()},
5010 outputTensorInfo,
5011 output.data(),
5012 dimension,
5013 true);
5014
5015 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5016 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5017 1.0f, 2.0f,
5018 3.0f, 4.0f,
5019 5.0f, 6.0f,
5020 7.0f, 8.0f,
5021 9.0f, 10.0f,
5022 11.0f, 12.0f,
5023 11.0f, 12.0f,
5024 13.0f, 14.0f,
5025 15.0f, 16.0f,
5026 17.0f, 18.0f
5027 }));
5028
5029 return result;
5030}
5031
5032LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
5033 armnn::IWorkloadFactory& workloadFactory,
5034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5035{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005036 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
5037 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005038}
5039
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005040template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005041LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
5042 armnn::IWorkloadFactory& workloadFactory,
5043 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5044 float qScale,
5045 int32_t qOffset)
5046{
5047 unsigned int dimension = 2;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005048 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005049
5050 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5051 1.0f, 2.0f,
5052 3.0f, 4.0f,
5053 5.0f, 6.0f,
5054 7.0f, 8.0f,
5055 9.0f, 10.0f,
5056 11.0f, 12.0f
5057 }));
5058
Jim Flynncbb66aa2019-05-15 13:03:54 +01005059 armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005060
5061 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5062 11.0f, 12.0f,
5063 13.0f, 14.0f,
5064 15.0f, 16.0f,
5065 17.0f, 18.0f,
5066 19.0f, 20.0f,
5067 21.0f, 22.0f,
5068 23.0f, 24.0f,
5069 25.0f, 26.0f,
5070 27.0f, 28.0f
5071 }));
5072
Jim Flynncbb66aa2019-05-15 13:03:54 +01005073 armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005074
5075 LayerTestResult<T, 4> result(outputTensorInfo);
5076
5077 std::vector<T> output;
5078 output.resize(outputTensorInfo.GetNumElements());
5079 Concatenate<T>(workloadFactory,
5080 memoryManager,
5081 {inputTensorInfo0, inputTensorInfo1},
5082 {input0.data(), input1.data()},
5083 outputTensorInfo,
5084 output.data(),
5085 dimension,
5086 true);
5087
5088 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5089 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5090 1.0f, 2.0f,
5091 3.0f, 4.0f,
5092 11.0f, 12.0f,
5093 13.0f, 14.0f,
5094 15.0f, 16.0f,
5095
5096 5.0f, 6.0f,
5097 7.0f, 8.0f,
5098 17.0f, 18.0f,
5099 19.0f, 20.0f,
5100 21.0f, 22.0f,
5101
5102 9.0f, 10.0f,
5103 11.0f, 12.0f,
5104 23.0f, 24.0f,
5105 25.0f, 26.0f,
5106 27.0f, 28.0f
5107 }));
5108
5109 return result;
5110}
5111
5112LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
5113 armnn::IWorkloadFactory& workloadFactory,
5114 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
5115{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005116 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
5117 workloadFactory, memoryManager, 0.0f, 0);
narpra015cdda352018-11-19 15:30:27 +00005118}
5119
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005120template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
narpra015cdda352018-11-19 15:30:27 +00005121LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
5122 armnn::IWorkloadFactory& workloadFactory,
5123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5124 float qScale,
5125 int32_t qOffset,
5126 bool useSubtensor)
5127{
5128 unsigned int dimension = 3;
Jim Flynncbb66aa2019-05-15 13:03:54 +01005129 armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005130
5131 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
5132 1.0f, 2.0f,
5133 3.0f, 4.0f,
5134 5.0f, 6.0f,
5135 7.0f, 8.0f,
5136 9.0f, 10.0f,
5137 11.0f, 12.0f
5138 }));
5139
Jim Flynncbb66aa2019-05-15 13:03:54 +01005140 armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005141
5142 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
5143 11.0f, 12.0f, 13.0f,
5144 14.0f, 15.0f, 16.0f,
5145
5146 17.0f, 18.0f, 19.0f,
5147 20.0f, 21.0f, 22.0f,
5148
5149 23.0f, 24.0f, 25.0f,
5150 26.0f, 27.0f, 28.0f
5151 }));
5152
Jim Flynncbb66aa2019-05-15 13:03:54 +01005153 armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
narpra015cdda352018-11-19 15:30:27 +00005154
5155 LayerTestResult<T, 4> result(outputTensorInfo);
5156
5157 std::vector<T> output;
5158 output.resize(outputTensorInfo.GetNumElements());
5159 Concatenate<T>(workloadFactory,
5160 memoryManager,
5161 {inputTensorInfo0, inputTensorInfo1},
5162 {input0.data(), input1.data()},
5163 outputTensorInfo,
5164 output.data(),
5165 dimension,
5166 useSubtensor);
5167
5168 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
5169 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
5170 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
5171 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
5172 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
5173 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
5174 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
5175 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
5176 }));
5177
5178 return result;
5179}
5180
5181LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
5182 armnn::IWorkloadFactory& workloadFactory,
5183 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5184 bool useSubtensor)
5185{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005186 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
5187 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00005188}
5189
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005190LayerTestResult<float, 4> ResizeBilinearNopTest(
5191 armnn::IWorkloadFactory& workloadFactory,
5192 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005193 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005194{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005195 const armnn::TensorInfo inputTensorInfo =
5196 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5197
5198 const armnn::TensorInfo outputTensorInfo =
5199 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005200
James Conroy6b965822018-11-01 11:33:09 +00005201 std::vector<float> inputData({
5202 1.0f, 2.0f, 3.0f, 4.0f,
5203 2.0f, 3.0f, 4.0f, 5.0f,
5204 3.0f, 4.0f, 5.0f, 6.0f,
5205 4.0f, 5.0f, 6.0f, 7.0f,
5206
telsoa014fcda012018-03-09 14:13:49 +00005207 1.0f, 2.0f, 3.0f, 4.0f,
5208 2.0f, 3.0f, 4.0f, 5.0f,
5209 3.0f, 4.0f, 5.0f, 6.0f,
5210 4.0f, 5.0f, 6.0f, 7.0f
James Conroy6b965822018-11-01 11:33:09 +00005211 });
5212
5213 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005214 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005215 {
5216 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005217 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005218 inputData = tmp;
5219 }
5220
5221 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005222
5223 LayerTestResult<float, 4> result(outputTensorInfo);
5224 result.outputExpected = input;
5225
5226 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5227 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5228
5229 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005230 descriptor.m_Parameters.m_DataLayout = dataLayout;
5231 armnn::WorkloadInfo info;
5232 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5233 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5234
5235 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5236
5237 inputHandle->Allocate();
5238 outputHandle->Allocate();
5239 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5240
Derek Lambertif30f7d32019-04-09 10:25:02 +01005241 workload->PostAllocationConfigure();
James Conroy074f3712018-10-03 09:32:03 +01005242 workload->Execute();
5243
5244 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5245 return result;
5246}
5247
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005248LayerTestResult<float, 4> SimpleResizeBilinearTest(
5249 armnn::IWorkloadFactory& workloadFactory,
5250 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005251 const armnn::DataLayout dataLayout)
James Conroy074f3712018-10-03 09:32:03 +01005252{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005253 const armnn::TensorInfo inputTensorInfo =
5254 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
5255
5256 const armnn::TensorInfo outputTensorInfo =
5257 armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
James Conroy074f3712018-10-03 09:32:03 +01005258
James Conroy6b965822018-11-01 11:33:09 +00005259 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005260 1.0f, 255.0f,
James Conroy6b965822018-11-01 11:33:09 +00005261 200.0f, 250.0f,
5262
5263 250.0f, 200.0f,
5264 250.0f, 1.0f
5265 });
James Conroy074f3712018-10-03 09:32:03 +01005266
5267 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
5268 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
James Conroy6b965822018-11-01 11:33:09 +00005269 // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
5270 // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
5271 // which we would expect if projecting the centre).
5272
5273 std::vector<float> outputData({
5274 1.0f,
5275
5276 250.0f
5277 });
5278
5279 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005280 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005281 {
5282 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005283 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005284 inputData = tmp;
5285
5286 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005287 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005288 outputData = tmp1;
5289 }
5290
5291 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5292
James Conroy074f3712018-10-03 09:32:03 +01005293 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005294 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
James Conroy074f3712018-10-03 09:32:03 +01005295
5296 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5297 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5298
5299 armnn::ResizeBilinearQueueDescriptor descriptor;
5300 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005301 armnn::WorkloadInfo info;
5302 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5303 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5304
5305 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5306
5307 inputHandle->Allocate();
5308 outputHandle->Allocate();
5309 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5310
Derek Lambertif30f7d32019-04-09 10:25:02 +01005311 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005312 workload->Execute();
5313
5314 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5315 return result;
5316}
5317
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005318LayerTestResult<float, 4> ResizeBilinearSqMinTest(
5319 armnn::IWorkloadFactory& workloadFactory,
5320 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005321 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005322{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005323 const armnn::TensorInfo inputTensorInfo =
5324 armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
5325
5326 const armnn::TensorInfo outputTensorInfo =
5327 armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005328
James Conroy6b965822018-11-01 11:33:09 +00005329 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005330 1.0f, 2.0f, 3.0f, 4.0f,
5331 2.0f, 3.0f, 4.0f, 5.0f,
5332 3.0f, 4.0f, 5.0f, 6.0f,
James Conroy6b965822018-11-01 11:33:09 +00005333 4.0f, 5.0f, 6.0f, 7.0f,
5334
5335 7.0f, 6.0f, 5.0f, 4.0f,
5336 6.0f, 5.0f, 4.0f, 3.0f,
5337 5.0f, 4.0f, 3.0f, 2.0f,
5338 4.0f, 3.0f, 2.0f, 1.0f
5339 });
5340
5341 std::vector<float> outputData({
5342 1.0f, 3.0f,
5343 3.0f, 5.0f,
5344
5345 7.0f, 5.0f,
5346 5.0f, 3.0f
5347 });
5348
5349 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005350 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005351 {
5352 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005353 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005354 inputData = tmp;
5355
5356 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005357 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005358 outputData = tmp1;
5359 }
5360
5361 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005362
telsoa014fcda012018-03-09 14:13:49 +00005363 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005364 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005365
5366 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5367 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5368
5369 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005370 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005371 armnn::WorkloadInfo info;
5372 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5373 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5374
5375 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5376
5377 inputHandle->Allocate();
5378 outputHandle->Allocate();
5379 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5380
Derek Lambertif30f7d32019-04-09 10:25:02 +01005381 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005382 workload->Execute();
5383
5384 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5385 return result;
5386}
5387
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005388LayerTestResult<float, 4> ResizeBilinearMinTest(
5389 armnn::IWorkloadFactory& workloadFactory,
5390 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005391 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005392{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005393 const armnn::TensorInfo inputTensorInfo =
5394 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
5395
5396 const armnn::TensorInfo outputTensorInfo =
5397 armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005398
James Conroy6b965822018-11-01 11:33:09 +00005399 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005400 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
5401 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
James Conroy6b965822018-11-01 11:33:09 +00005402 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
5403
5404 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
5405 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
5406 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
5407 });
5408
5409 std::vector<float> outputData({
5410 1.0f, 2.6666f, 6.00f,
5411 78.5f, 179.3333f, 401.00f,
5412
5413 987.0f, 454.6670f, 203.33f,
5414 48.5f, 22.3333f, 10.00f
5415 });
5416
5417 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005418 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005419 {
5420 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005421 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005422 inputData = tmp;
5423
5424 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005425 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005426 outputData = tmp1;
5427 }
5428
5429 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +00005430
5431 LayerTestResult<float, 4> result(outputTensorInfo);
James Conroy6b965822018-11-01 11:33:09 +00005432 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005433
5434 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5435 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5436
5437 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005438 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005439 armnn::WorkloadInfo info;
5440 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5441 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5442
5443 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5444
5445 inputHandle->Allocate();
5446 outputHandle->Allocate();
5447 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5448
Derek Lambertif30f7d32019-04-09 10:25:02 +01005449 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005450 workload->Execute();
5451
5452 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5453 return result;
5454}
5455
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005456LayerTestResult<float, 4> ResizeBilinearMagTest(
5457 armnn::IWorkloadFactory& workloadFactory,
5458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00005459 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00005460{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005461 const armnn::TensorInfo inputTensorInfo =
5462 armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
5463
5464 const armnn::TensorInfo outputTensorInfo =
5465 armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
telsoa014fcda012018-03-09 14:13:49 +00005466
James Conroy6b965822018-11-01 11:33:09 +00005467 std::vector<float> inputData({
James Conroy074f3712018-10-03 09:32:03 +01005468 1.0f, 2.0f,
5469 13.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005470 144.0f, 233.0f,
telsoa014fcda012018-03-09 14:13:49 +00005471
James Conroy6b965822018-11-01 11:33:09 +00005472 233.0f, 144.0f,
5473 21.0f, 13.0f,
5474 2.0f, 1.0f
5475 });
5476
5477 std::vector<float> outputData({
James Conroy074f3712018-10-03 09:32:03 +01005478 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
5479 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
James Conroy6b965822018-11-01 11:33:09 +00005480 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
5481
5482 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
5483 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
5484 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
5485 });
5486
5487 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +00005488 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy6b965822018-11-01 11:33:09 +00005489 {
5490 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005491 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005492 inputData = tmp;
5493
5494 std::vector<float> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005495 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
James Conroy6b965822018-11-01 11:33:09 +00005496 outputData = tmp1;
5497 }
5498
5499 auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
5500
5501 LayerTestResult<float, 4> result(outputTensorInfo);
5502 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
telsoa014fcda012018-03-09 14:13:49 +00005503
5504 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5505 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5506
5507 armnn::ResizeBilinearQueueDescriptor descriptor;
James Conroy074f3712018-10-03 09:32:03 +01005508 descriptor.m_Parameters.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +00005509 armnn::WorkloadInfo info;
5510 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5511 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5512
5513 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
5514
5515 inputHandle->Allocate();
5516 outputHandle->Allocate();
5517 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
5518
Derek Lambertif30f7d32019-04-09 10:25:02 +01005519 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005520 workload->Execute();
5521
5522 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5523 return result;
5524}
5525
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005526LayerTestResult<float, 2> FakeQuantizationTest(
5527 armnn::IWorkloadFactory& workloadFactory,
5528 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00005529{
5530 constexpr unsigned int width = 2;
5531 constexpr unsigned int height = 3;
5532
5533 const armnn::TensorInfo tensorInfo({height, width },
5534 armnn::DataType::Float32);
5535 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5536 -10.0f, -5.0f,
5537 0.0f, 5.0f,
5538 10.0f, 10.0f
5539 }));
5540
5541 LayerTestResult<float, 2> ret(tensorInfo);
5542
5543 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5544
5545 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
5546
5547 armnn::FakeQuantizationQueueDescriptor data;
5548 armnn::WorkloadInfo info;
5549
5550 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
5551 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
5552 float min = -10.f;
5553 float max = 10.f;
5554
5555 data.m_Parameters.m_Min = min;
5556 data.m_Parameters.m_Max = max;
5557
5558 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
5559 armnn::FakeQuantizationQueueDescriptor refData = data;
5560 armnn::WorkloadInfo refInfo = info;
5561 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
5562
5563 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
5564
5565 inputHandle->Allocate();
5566 outputHandle->Allocate();
5567
5568 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
5569
Derek Lambertif30f7d32019-04-09 10:25:02 +01005570 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00005571 workload->Execute();
5572
5573 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
5574
5575 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
5576 0.0f, 63.0f,
5577 128.0f, 191.0f,
5578 255.0f, 255.0f
5579 }));
5580 return ret;
5581}
5582
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005583namespace
5584{
5585
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005586LayerTestResult<float, 4> L2NormalizationTestImpl(
5587 armnn::IWorkloadFactory& workloadFactory,
5588 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5589 const armnn::TensorShape& inputOutputTensorShape,
5590 const std::vector<float>& inputValues,
5591 const std::vector<float>& expectedOutputValues,
Matthew Bentham8800c002018-11-19 13:19:28 +00005592 const armnn::DataLayout layout)
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005593{
5594 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5595 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
5596
jimfly013aab7c32018-11-12 13:32:08 +00005597 // at this point if we require it permute the input data
5598 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
5599 std::vector<float> inputData = inputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005600 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005601 {
5602 std::vector<float> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005603 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005604 inputData = tmp;
5605 }
5606
5607 auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005608
5609 LayerTestResult<float, 4> result(outputTensorInfo);
jimfly013aab7c32018-11-12 13:32:08 +00005610 std::vector<float> expectedOutputData = expectedOutputValues;
Matthew Bentham8800c002018-11-19 13:19:28 +00005611 if (layout == armnn::DataLayout::NHWC)
jimfly013aab7c32018-11-12 13:32:08 +00005612 {
5613 std::vector<float> tmp(expectedOutputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +00005614 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
5615 expectedOutputData.data(), tmp.data(), sizeof(float));
jimfly013aab7c32018-11-12 13:32:08 +00005616 expectedOutputData = tmp;
5617 }
5618 result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005619
5620 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5621 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5622
5623 armnn::L2NormalizationQueueDescriptor descriptor;
Matthew Bentham8800c002018-11-19 13:19:28 +00005624 descriptor.m_Parameters.m_DataLayout = layout;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005625 armnn::WorkloadInfo info;
5626
5627 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5628 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5629
5630 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
5631
5632 inputHandle->Allocate();
5633 outputHandle->Allocate();
5634
5635 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
5636
Derek Lambertif30f7d32019-04-09 10:25:02 +01005637 workload->PostAllocationConfigure();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005638 ExecuteWorkload(*workload, memoryManager);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01005639
5640 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
5641
5642 return result;
5643}
5644
5645float CalcInvL2Norm(std::initializer_list<float> elements)
5646{
5647 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
5648 [](float acc, float element) { return acc + element * element; });
5649 return 1.0f / sqrtf(reduction);
5650}
5651
5652} // anonymous namespace
5653
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005654template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005655LayerTestResult<T, 2> Pad2dTestCommon(
5656 armnn::IWorkloadFactory& workloadFactory,
5657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5658 float qScale,
5659 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005660{
Derek Lambertif30f7d32019-04-09 10:25:02 +01005661 const armnn::TensorShape inputShape{ 3, 3 };
5662 const armnn::TensorShape outputShape{ 7, 7 };
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005663
Derek Lambertif30f7d32019-04-09 10:25:02 +01005664 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5665 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005666
Derek Lambertif30f7d32019-04-09 10:25:02 +01005667 std::vector<T> inputValues(
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005668 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005669 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005670 // Height (3) x Width (3)
5671 4, 8, 6,
5672 7, 4, 4,
5673 3, 2, 4
5674 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005675
Derek Lambertif30f7d32019-04-09 10:25:02 +01005676 std::vector<T> expectedOutputValues(
5677 QuantizedVector<T>(qScale, qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005678 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005679 0, 0, 0, 0, 0, 0, 0,
5680 0, 0, 0, 0, 0, 0, 0,
5681 0, 0, 4, 8, 6, 0, 0,
5682 0, 0, 7, 4, 4, 0, 0,
5683 0, 0, 3, 2, 4, 0, 0,
5684 0, 0, 0, 0, 0, 0, 0,
5685 0, 0, 0, 0, 0, 0, 0
5686 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005687
Derek Lambertif30f7d32019-04-09 10:25:02 +01005688 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005689
Derek Lambertif30f7d32019-04-09 10:25:02 +01005690 LayerTestResult<T, 2> result(outputTensorInfo);
5691 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005692
Derek Lambertif30f7d32019-04-09 10:25:02 +01005693 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5694 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005695
Derek Lambertif30f7d32019-04-09 10:25:02 +01005696 armnn::PadQueueDescriptor descriptor;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005697
Derek Lambertif30f7d32019-04-09 10:25:02 +01005698 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5699 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5700 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005701
Derek Lambertif30f7d32019-04-09 10:25:02 +01005702 descriptor.m_Parameters.m_PadList = PadList;
5703 armnn::WorkloadInfo info;
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005704
Derek Lambertif30f7d32019-04-09 10:25:02 +01005705 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5706 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005707
Derek Lambertif30f7d32019-04-09 10:25:02 +01005708 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005709
Derek Lambertif30f7d32019-04-09 10:25:02 +01005710 inputHandle->Allocate();
5711 outputHandle->Allocate();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005712
Derek Lambertif30f7d32019-04-09 10:25:02 +01005713 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005714
Derek Lambertif30f7d32019-04-09 10:25:02 +01005715 workload->PostAllocationConfigure();
5716 workload->Execute();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005717
Derek Lambertif30f7d32019-04-09 10:25:02 +01005718 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005719
Derek Lambertif30f7d32019-04-09 10:25:02 +01005720 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005721}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005722
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005723template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005724LayerTestResult<T, 3> Pad3dTestCommon(
5725 armnn::IWorkloadFactory& workloadFactory,
5726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5727 float qScale,
5728 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005729{
5730 const armnn::TensorShape inputShape{ 2, 2, 2 };
5731 const armnn::TensorShape outputShape{ 3, 5, 6 };
5732
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005733 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5734 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005735
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005736 std::vector<T> inputValues(
5737 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005738 {
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005739 // Channel 0, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005740 0, 4,
5741 2, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005742
5743 // Channel 1, Height (2) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005744 6, 1,
5745 5, 2
5746 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005747
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005748 std::vector<T> expectedOutputValues(
5749 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005750 {
5751
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005752 0, 0, 0, 0, 0, 0,
5753 0, 0, 0, 0, 0, 0,
5754 0, 0, 0, 4, 0, 0,
5755 0, 0, 2, 5, 0, 0,
5756 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005757
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005758 0, 0, 0, 0, 0, 0,
5759 0, 0, 0, 0, 0, 0,
5760 0, 0, 6, 1, 0, 0,
5761 0, 0, 5, 2, 0, 0,
5762 0, 0, 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005763
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005764 0, 0, 0, 0, 0, 0,
5765 0, 0, 0, 0, 0, 0,
5766 0, 0, 0, 0, 0, 0,
5767 0, 0, 0, 0, 0, 0,
5768 0, 0, 0, 0, 0, 0
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005769
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005770 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005771
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005772 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005773
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005774 LayerTestResult<T, 3> result(outputTensorInfo);
5775 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005776
5777 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
5778 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
5779
5780 armnn::PadQueueDescriptor descriptor;
5781
5782 std::vector<std::pair<unsigned int, unsigned int>> PadList;
5783 PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
5784 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
5785 PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
5786
5787 descriptor.m_Parameters.m_PadList = PadList;
5788 armnn::WorkloadInfo info;
5789
5790 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
5791 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
5792
5793 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
5794
5795 inputHandle->Allocate();
5796 outputHandle->Allocate();
5797
5798 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
5799
Derek Lambertif30f7d32019-04-09 10:25:02 +01005800 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005801 workload->Execute();
5802
5803 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
5804
5805 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005806}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005807
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005808template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00005809LayerTestResult<T, 4> Pad4dTestCommon(
5810 armnn::IWorkloadFactory& workloadFactory,
5811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
5812 float qScale,
5813 int32_t qOffset)
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005814{
5815 const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
5816 const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
5817
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00005818 const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
5819 const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005820
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005821 std::vector<T> inputValues(
5822 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005823 {
5824 // Batch 0, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005825 0, 1,
5826 2, 3,
5827 4, 5,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005828
5829 // Batch 0, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005830 6, 7,
5831 8, 9,
5832 10, 11,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005833
5834 // Batch 1, Channel 0, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005835 12, 13,
5836 14, 15,
5837 16, 17,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005838
5839 // Batch 1, Channel 1, Height (3) x Width (2)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005840 18, 19,
5841 20, 21,
5842 22, 23
5843 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005844
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005845 std::vector<T> expectedOutputValues(
5846 QuantizedVector<T>(qScale,qOffset,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005847 {
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005848 0, 0, 0, 0,
5849 0, 0, 0, 0,
5850 0, 0, 0, 0,
5851 0, 0, 0, 0,
5852 0, 0, 0, 0,
5853 0, 0, 0, 0,
5854 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005855
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005856 0, 0, 0, 0,
5857 0, 0, 0, 0,
5858 0, 0, 0, 0,
5859 0, 0, 0, 0,
5860 0, 0, 0, 0,
5861 0, 0, 0, 0,
5862 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005863
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005864 0, 0, 0, 0,
5865 0, 0, 0, 0,
5866 0, 0, 0, 0,
5867 0, 0, 0, 0,
5868 0, 0, 0, 0,
5869 0, 0, 0, 0,
5870 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005871
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005872 0, 0, 0, 0,
5873 0, 0, 0, 0,
5874 0, 0, 0, 0,
5875 0, 0, 0, 0,
5876 0, 0, 0, 0,
5877 0, 0, 0, 0,
5878 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005879
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005880 0, 0, 0, 0,
5881 0, 0, 0, 0,
5882 0, 0, 0, 0,
5883 0, 0, 0, 0,
5884 0, 0, 0, 0,
5885 0, 0, 0, 0,
5886 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005887
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005888 0, 0, 0, 0,
5889 0, 0, 0, 0,
5890 0, 0, 0, 0,
5891 0, 0, 0, 0,
5892 0, 0, 0, 0,
5893 0, 0, 0, 0,
5894 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005895
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005896 0, 0, 0, 0,
5897 0, 0, 0, 0,
5898 0, 0, 0, 0,
5899 0, 0, 0, 0,
5900 0, 0, 0, 0,
5901 0, 0, 0, 0,
5902 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005903
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005904 0, 0, 0, 0,
5905 0, 0, 0, 0,
5906 0, 0, 0, 0,
5907 0, 0, 1, 0,
5908 0, 2, 3, 0,
5909 0, 4, 5, 0,
5910 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005911
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005912 0, 0, 0, 0,
5913 0, 0, 0, 0,
5914 0, 0, 0, 0,
5915 0, 6, 7, 0,
5916 0, 8, 9, 0,
5917 0, 10, 11, 0,
5918 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005919
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005920 0, 0, 0, 0,
5921 0, 0, 0, 0,
5922 0, 0, 0, 0,
5923 0, 0, 0, 0,
5924 0, 0, 0, 0,
5925 0, 0, 0, 0,
5926 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005927
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005928 0, 0, 0, 0,
5929 0, 0, 0, 0,
5930 0, 0, 0, 0,
5931 0, 0, 0, 0,
5932 0, 0, 0, 0,
5933 0, 0, 0, 0,
5934 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005935
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005936 0, 0, 0, 0,
5937 0, 0, 0, 0,
5938 0, 0, 0, 0,
5939 0, 0, 0, 0,
5940 0, 0, 0, 0,
5941 0, 0, 0, 0,
5942 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005943
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005944 0, 0, 0, 0,
5945 0, 0, 0, 0,
5946 0, 0, 0, 0,
5947 0, 12, 13, 0,
5948 0, 14, 15, 0,
5949 0, 16, 17, 0,
5950 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005951
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005952 0, 0, 0, 0,
5953 0, 0, 0, 0,
5954 0, 0, 0, 0,
5955 0, 18, 19, 0,
5956 0, 20, 21, 0,
5957 0, 22, 23, 0,
5958 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005959
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005960 0, 0, 0, 0,
5961 0, 0, 0, 0,
5962 0, 0, 0, 0,
5963 0, 0, 0, 0,
5964 0, 0, 0, 0,
5965 0, 0, 0, 0,
5966 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005967
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005968 0, 0, 0, 0,
5969 0, 0, 0, 0,
5970 0, 0, 0, 0,
5971 0, 0, 0, 0,
5972 0, 0, 0, 0,
5973 0, 0, 0, 0,
5974 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005975
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005976 0, 0, 0, 0,
5977 0, 0, 0, 0,
5978 0, 0, 0, 0,
5979 0, 0, 0, 0,
5980 0, 0, 0, 0,
5981 0, 0, 0, 0,
5982 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005983
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005984 0, 0, 0, 0,
5985 0, 0, 0, 0,
5986 0, 0, 0, 0,
5987 0, 0, 0, 0,
5988 0, 0, 0, 0,
5989 0, 0, 0, 0,
5990 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005991
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01005992 0, 0, 0, 0,
5993 0, 0, 0, 0,
5994 0, 0, 0, 0,
5995 0, 0, 0, 0,
5996 0, 0, 0, 0,
5997 0, 0, 0, 0,
5998 0, 0, 0, 0,
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01005999
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006000 0, 0, 0, 0,
6001 0, 0, 0, 0,
6002 0, 0, 0, 0,
6003 0, 0, 0, 0,
6004 0, 0, 0, 0,
6005 0, 0, 0, 0,
6006 0, 0, 0, 0
6007 }));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006008
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006009 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006010
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006011 LayerTestResult<T, 4> result(outputTensorInfo);
6012 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006013
6014 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
6015 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6016
6017 armnn::PadQueueDescriptor descriptor;
6018
6019 std::vector<std::pair<unsigned int, unsigned int>> PadList;
6020 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6021 PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
6022 PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
6023 PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
6024
6025 descriptor.m_Parameters.m_PadList = PadList;
6026 armnn::WorkloadInfo info;
6027
6028 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
6029 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6030
6031 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
6032
6033 inputHandle->Allocate();
6034 outputHandle->Allocate();
6035
6036 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
6037
Derek Lambertif30f7d32019-04-09 10:25:02 +01006038 workload->PostAllocationConfigure();
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006039 workload->Execute();
6040
6041 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6042
6043 return result;
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006044}
6045
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006046LayerTestResult<uint8_t, 2> PadUint82dTest(
6047 armnn::IWorkloadFactory& workloadFactory,
6048 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006049{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006050 return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006051}
6052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006053LayerTestResult<uint8_t, 3> PadUint83dTest(
6054 armnn::IWorkloadFactory& workloadFactory,
6055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006056{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006057 return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006058}
6059
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006060LayerTestResult<uint8_t, 4> PadUint84dTest(
6061 armnn::IWorkloadFactory& workloadFactory,
6062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006063{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006064 return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006065}
6066
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006067LayerTestResult<float, 2> PadFloat322dTest(
6068 armnn::IWorkloadFactory& workloadFactory,
6069 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006070{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006071 return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006072}
6073
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006074LayerTestResult<float, 3> PadFloat323dTest(
6075 armnn::IWorkloadFactory& workloadFactory,
6076 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006077{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006078 return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006079}
6080
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006081LayerTestResult<float, 4> PadFloat324dTest(
6082 armnn::IWorkloadFactory& workloadFactory,
6083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006084{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006085 return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Mohamed Nour Abouelseouddd6acea2018-10-18 12:26:19 +01006086}
Mohamed Nour Abouelseoud7420e552018-10-12 12:26:24 +01006087
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006088LayerTestResult<float, 4> L2Normalization1dTest(
6089 armnn::IWorkloadFactory& workloadFactory,
6090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006091 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006092{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006093 // Width: 1
6094 // Height: 1
6095 // Channels: 10
6096 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006097 unsigned int numberOfBatches = 1;
6098 unsigned int numberOfChannels = 10;
6099 unsigned int height = 1;
6100 unsigned int width = 1;
telsoa014fcda012018-03-09 14:13:49 +00006101
jimfly013aab7c32018-11-12 13:32:08 +00006102
Nina Drozdd41b2592018-11-19 13:03:36 +00006103 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006104 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006105 std::vector<float> inputValues
6106 {
6107 // Batch 0, Channel 0, Height (1) x Width (1)
6108 1.0f,
telsoa014fcda012018-03-09 14:13:49 +00006109
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006110 // Batch 0, Channel 1, Height (1) x Width (1)
6111 2.0f,
telsoa014fcda012018-03-09 14:13:49 +00006112
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006113 // Batch 0, Channel 2, Height (1) x Width (1)
6114 3.0f,
telsoa014fcda012018-03-09 14:13:49 +00006115
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006116 // Batch 0, Channel 3, Height (1) x Width (1)
6117 4.0f,
6118
6119 // Batch 0, Channel 4, Height (1) x Width (1)
6120 5.0f,
6121
6122 // Batch 0, Channel 5, Height (1) x Width (1)
6123 6.0f,
6124
6125 // Batch 0, Channel 6, Height (1) x Width (1)
6126 7.0f,
6127
6128 // Batch 0, Channel 7, Height (1) x Width (1)
6129 8.0f,
6130
6131 // Batch 0, Channel 8, Height (1) x Width (1)
6132 9.0f,
6133
6134 // Batch 0, Channel 9, Height (1) x Width (1)
6135 10.0f
6136 };
telsoa014fcda012018-03-09 14:13:49 +00006137 const float approxInvL2Norm = 0.050964719f;
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006138 std::vector<float> expectedOutputValues
6139 {
6140 // Batch 0, Channel 0, Height (1) x Width (1)
telsoa014fcda012018-03-09 14:13:49 +00006141 1.0f * approxInvL2Norm,
6142 2.0f * approxInvL2Norm,
6143 3.0f * approxInvL2Norm,
6144 4.0f * approxInvL2Norm,
6145 5.0f * approxInvL2Norm,
6146 6.0f * approxInvL2Norm,
6147 7.0f * approxInvL2Norm,
6148 8.0f * approxInvL2Norm,
6149 9.0f * approxInvL2Norm,
6150 10.0f * approxInvL2Norm
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006151 };
telsoa014fcda012018-03-09 14:13:49 +00006152
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006153
6154 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006155 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006156}
6157
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006158LayerTestResult<float, 4> L2Normalization2dTest(
6159 armnn::IWorkloadFactory& workloadFactory,
6160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006161 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006162{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006163 // Width: 5
6164 // Height: 1
6165 // Channels: 2
6166 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006167 unsigned int numberOfBatches = 1;
6168 unsigned int numberOfChannels = 2;
6169 unsigned int height = 1;
6170 unsigned int width = 5;
telsoa014fcda012018-03-09 14:13:49 +00006171
Nina Drozdd41b2592018-11-19 13:03:36 +00006172 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006173 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006174 std::vector<float> inputValues
6175 {
6176 // Batch 0, Channel 0, Height (1) x Width (5)
telsoa014fcda012018-03-09 14:13:49 +00006177 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
telsoa014fcda012018-03-09 14:13:49 +00006178
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006179 // Batch 0, Channel 1, Height (1) x Width (5)
6180 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
6181 };
6182 std::vector<float> expectedOutputValues
6183 {
6184 // Batch 0, Channel 0, Height (1) x Width (5)
6185 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6186 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6187 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6188 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006189 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
6190
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006191 // Batch 0, Channel 1, Height (1) x Width (5)
6192 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
6193 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
6194 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
6195 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
telsoa014fcda012018-03-09 14:13:49 +00006196 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006197 };
telsoa014fcda012018-03-09 14:13:49 +00006198
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006199 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006200 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006201}
telsoa014fcda012018-03-09 14:13:49 +00006202
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006203LayerTestResult<float, 4> L2Normalization3dTest(
6204 armnn::IWorkloadFactory& workloadFactory,
6205 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006206 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006207{
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006208 // Width: 3
6209 // Height: 4
6210 // Channels: 2
6211 // BatchSize: 1
jimfly013aab7c32018-11-12 13:32:08 +00006212 unsigned int numberOfBatches = 1;
6213 unsigned int numberOfChannels = 2;
6214 unsigned int height = 4;
6215 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006216
Nina Drozdd41b2592018-11-19 13:03:36 +00006217 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006218 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006219 std::vector<float> inputValues
6220 {
6221 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006222 119.0f, 21.0f, 150.0f,
6223 149.0f, 32.0f, 179.0f,
6224 15.0f, 227.0f, 141.0f,
6225 147.0f, 199.0f, 220.0f,
6226
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006227 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006228 110.0f, 140.0f, 73.0f,
6229 211.0f, 212.0f, 89.0f,
6230 24.0f, 138.0f, 188.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006231 162.0f, 12.0f, 161.0f
6232 };
6233 std::vector<float> expectedOutputValues
6234 {
6235 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006236 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6237 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6238 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6239 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6240 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6241 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6242 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6243 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6244 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6245 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6246 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
6247 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
6248
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006249 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006250 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
6251 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
6252 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
6253 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
6254 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
6255 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
6256 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
6257 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
6258 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
6259 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
6260 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006261 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
6262 };
telsoa014fcda012018-03-09 14:13:49 +00006263
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006264 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006265 inputValues, expectedOutputValues, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006266}
telsoa014fcda012018-03-09 14:13:49 +00006267
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006268LayerTestResult<float, 4> L2Normalization4dTest(
6269 armnn::IWorkloadFactory& workloadFactory,
6270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00006271 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +00006272{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006273 // Width: 3
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006274 // Height: 4
6275 // Channels: 3
6276 // BatchSize: 2
jimfly013aab7c32018-11-12 13:32:08 +00006277 unsigned int numberOfBatches = 2;
6278 unsigned int numberOfChannels = 3;
6279 unsigned int height = 4;
6280 unsigned int width = 3;
telsoa014fcda012018-03-09 14:13:49 +00006281
Nina Drozdd41b2592018-11-19 13:03:36 +00006282 const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
jimfly013aab7c32018-11-12 13:32:08 +00006283 numberOfBatches, numberOfChannels, height, width, layout);
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006284 std::vector<float> inputValues
6285 {
6286 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006287 235.0f, 46.0f, 178.0f,
6288 100.0f, 123.0f, 19.0f,
6289 172.0f, 74.0f, 250.0f,
6290 6.0f, 195.0f, 80.0f,
6291
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006292 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006293 113.0f, 95.0f, 202.0f,
6294 77.0f, 114.0f, 71.0f,
6295 122.0f, 246.0f, 166.0f,
6296 82.0f, 28.0f, 37.0f,
6297
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006298 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006299 56.0f, 170.0f, 162.0f,
6300 194.0f, 89.0f, 254.0f,
6301 12.0f, 209.0f, 200.0f,
6302 1.0f, 64.0f, 54.0f,
6303
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006304 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006305 67.0f, 90.0f, 49.0f,
6306 7.0f, 163.0f, 18.0f,
6307 25.0f, 117.0f, 103.0f,
6308 247.0f, 59.0f, 189.0f,
6309
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006310 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006311 239.0f, 104.0f, 199.0f,
6312 17.0f, 124.0f, 153.0f,
6313 222.0f, 217.0f, 75.0f,
6314 32.0f, 126.0f, 21.0f,
6315
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006316 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006317 97.0f, 145.0f, 215.0f,
6318 115.0f, 116.0f, 238.0f,
6319 226.0f, 16.0f, 132.0f,
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006320 92.0f, 125.0f, 88.0f
6321 };
6322 std::vector<float> expectedOutputValues
6323 {
6324 // Batch 0, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006325 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6326 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6327 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6328 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6329 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6330 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6331 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6332 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6333 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6334 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6335 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6336 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6337
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006338 // Batch 0, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006339 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6340 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6341 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6342 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6343 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6344 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6345 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6346 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6347 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6348 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6349 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6350 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6351
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006352 // Batch 0, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006353 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
6354 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
6355 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
6356 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
6357 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
6358 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
6359 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
6360 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
6361 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
6362 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
6363 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
6364 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
6365
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006366 // Batch 1, Channel 0, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006367 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6368 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6369 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6370 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6371 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6372 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6373 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6374 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6375 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6376 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6377 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6378 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6379
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006380 // Batch 1, Channel 1, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006381 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6382 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6383 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6384 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6385 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6386 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6387 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6388 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6389 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6390 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6391 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
6392 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
6393
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006394 // Batch 1, Channel 2, Height (4) x Width (3)
telsoa014fcda012018-03-09 14:13:49 +00006395 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
6396 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
6397 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
6398 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
6399 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
6400 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
6401 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
6402 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
6403 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
6404 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
6405 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
Matteo Martincigh539b44d2018-10-01 09:26:39 +01006406 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
6407 };
telsoa014fcda012018-03-09 14:13:49 +00006408
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006409 return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
jimfly013aab7c32018-11-12 13:32:08 +00006410 inputValues, expectedOutputValues, layout);
telsoa014fcda012018-03-09 14:13:49 +00006411}
6412
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006413template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006414LayerTestResult<T, 4> ConstantTestImpl(
6415 armnn::IWorkloadFactory& workloadFactory,
6416 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
telsoa014fcda012018-03-09 14:13:49 +00006417 float qScale,
6418 int32_t qOffset)
6419{
6420 constexpr unsigned int inputWidth = 3;
6421 constexpr unsigned int inputHeight = 4;
6422 constexpr unsigned int inputChannels = 3;
6423 constexpr unsigned int inputBatchSize = 2;
6424
6425 constexpr unsigned int outputWidth = inputWidth;
6426 constexpr unsigned int outputHeight = inputHeight;
6427 constexpr unsigned int outputChannels = inputChannels;
6428 constexpr unsigned int outputBatchSize = inputBatchSize;
6429
Nina Drozd58ef2c62019-05-16 12:09:18 +01006430 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
6431 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006432
Nina Drozd58ef2c62019-05-16 12:09:18 +01006433 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
6434 ArmnnType, qScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00006435
6436 // Set quantization parameters if the requested type is a quantized type.
6437 if(armnn::IsQuantizedType<T>())
6438 {
6439 inputTensorInfo.SetQuantizationScale(qScale);
6440 inputTensorInfo.SetQuantizationOffset(qOffset);
6441 outputTensorInfo.SetQuantizationScale(qScale);
6442 outputTensorInfo.SetQuantizationOffset(qOffset);
6443 }
6444
6445 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
6446 QuantizedVector<T>(qScale, qOffset, {
6447 // Batch 0, Channel 0
6448 235.0f, 46.0f, 178.0f,
6449 100.0f, 123.0f, 19.0f,
6450 172.0f, 74.0f, 250.0f,
6451 6.0f, 195.0f, 80.0f,
6452
6453 // Batch 0, Channel 1
6454 113.0f, 95.0f, 202.0f,
6455 77.0f, 114.0f, 71.0f,
6456 122.0f, 246.0f, 166.0f,
6457 82.0f, 28.0f, 37.0f,
6458
6459 // Batch 0, Channel 2
6460 56.0f, 170.0f, 162.0f,
6461 194.0f, 89.0f, 254.0f,
6462 12.0f, 209.0f, 200.0f,
6463 1.0f, 64.0f, 54.0f,
6464
6465 // Batch 1, Channel 0
6466 67.0f, 90.0f, 49.0f,
6467 7.0f, 163.0f, 18.0f,
6468 25.0f, 117.0f, 103.0f,
6469 247.0f, 59.0f, 189.0f,
6470
6471 // Batch 1, Channel 1
6472 239.0f, 104.0f, 199.0f,
6473 17.0f, 124.0f, 153.0f,
6474 222.0f, 217.0f, 75.0f,
6475 32.0f, 126.0f, 21.0f,
6476
6477 // Batch 1, Channel 2
6478 97.0f, 145.0f, 215.0f,
6479 115.0f, 116.0f, 238.0f,
6480 226.0f, 16.0f, 132.0f,
6481 92.0f, 125.0f, 88.0f,
6482 })));
6483
6484 LayerTestResult<T, 4> result(outputTensorInfo);
6485 result.outputExpected = input;
6486
6487 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6488
6489 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
6490 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
6491
6492 armnn::ConstantQueueDescriptor descriptor;
6493 descriptor.m_LayerOutput = &constantTensor;
6494
6495 armnn::WorkloadInfo info;
6496 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
6497
6498 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
6499
6500 outputHandle->Allocate();
6501
Derek Lambertif30f7d32019-04-09 10:25:02 +01006502 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006503 workload->Execute();
6504
6505 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
6506 return result;
6507}
6508
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006509LayerTestResult<float, 4> ConstantTest(
6510 armnn::IWorkloadFactory& workloadFactory,
6511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006512{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006513 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006514}
6515
Nina Drozd58ef2c62019-05-16 12:09:18 +01006516LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
6517 armnn::IWorkloadFactory& workloadFactory,
6518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6519{
6520 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
6521}
6522
6523LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006524 armnn::IWorkloadFactory& workloadFactory,
6525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006526{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00006527 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
telsoa014fcda012018-03-09 14:13:49 +00006528}
6529
Jim Flynn4ed6c832019-05-20 11:02:46 +01006530LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Ferran Balaguerb2845652019-02-27 09:42:06 +00006531 armnn::IWorkloadFactory& workloadFactory,
6532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6533{
6534 unsigned int outputWidth = 3;
6535 unsigned int outputHeight = 6;
6536 unsigned int outputChannels = 3;
6537
6538 unsigned int inputWidth1 = 3;
6539 unsigned int inputHeight1 = 6;
6540 unsigned int inputChannels1 = 2;
6541
6542 unsigned int inputWidth2 = 3;
6543 unsigned int inputHeight2 = 6;
6544 unsigned int inputChannels2 = 1;
6545
6546 // Defines the tensor descriptors.
6547 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6548 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6549 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
6550
6551 // Quantized input1 tensor. Range [-3, 1]
6552 const float inputScale1 = 0.015686f;
6553 const int32_t inputOffset1 = 192;
6554
6555 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6556 {
6557 1, 2, 3,
6558 4, 5, 6,
6559 7, 8, 9,
6560 10, 11, 12,
6561 13, 14, 15,
6562 16, 17, 18,
6563
6564 19, 20, 21,
6565 22, 23, 24,
6566 25, 26, 27,
6567 28, 29, 30,
6568 31, 32, 33,
6569 34, 35, 36,
6570 })
6571 );
6572
6573 // Quatized input2 tensor. Range [-1, 4]
6574 const float inputScale2 = 0.019608f;
6575 const int32_t inputOffset2 = 50;
6576
6577 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6578 {
6579 37, 38, 39,
6580 40, 41, 42,
6581 43, 44, 45,
6582 46, 47, 48,
6583 49, 50, 51,
6584 52, 53, 54,
6585 })
6586 );
6587
6588 // Output has the same quantization parameters than input1,
6589 // so that only the requantization of input2 is required
6590 const float outputScale = 0.015686f;
6591 const int32_t outputOffset = 192;
6592
6593 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6594
6595 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
6596 {
6597 1, 2, 3,
6598 4, 5, 6,
6599 7, 8, 9,
6600 10, 11, 12,
6601 13, 14, 15,
6602 16, 17, 18,
6603
6604 19, 20, 21,
6605 22, 23, 24,
6606 25, 26, 27,
6607 28, 29, 30,
6608 31, 32, 33,
6609 34, 35, 36,
6610
6611 176, 177, 178,
6612 179, 181, 182,
6613 183, 184, 186,
6614 187, 188, 189,
6615 191, 192, 193,
6616 195, 196, 197,
6617 })
6618 );
6619
6620 outputTensorInfo.SetQuantizationScale(outputScale);
6621 outputTensorInfo.SetQuantizationOffset(outputOffset);
6622 inputTensorInfo1.SetQuantizationScale(inputScale1);
6623 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
6624 inputTensorInfo2.SetQuantizationScale(inputScale2);
6625 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
6626
6627 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006628 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006629
6630 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006631 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006632
6633 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6634
6635 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6636
6637 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6638 subTensorsSupported ?
6639 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6640 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6641
6642 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6643 subTensorsSupported ?
6644 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6645 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6646
Jim Flynne242f2d2019-05-22 14:24:13 +01006647 armnn::ConcatQueueDescriptor data;
Ferran Balaguerb2845652019-02-27 09:42:06 +00006648 armnn::WorkloadInfo info;
6649 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6650 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6651 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6652
6653 data.m_ViewOrigins.push_back(window1);
6654 data.m_ViewOrigins.push_back(window2);
6655
Jim Flynn4ed6c832019-05-20 11:02:46 +01006656 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Ferran Balaguerb2845652019-02-27 09:42:06 +00006657
6658 inputHandle1->Allocate();
6659 inputHandle2->Allocate();
6660 outputHandle->Allocate();
6661
6662 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6663 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6664
Derek Lambertif30f7d32019-04-09 10:25:02 +01006665 workload->PostAllocationConfigure();
Ferran Balaguerb2845652019-02-27 09:42:06 +00006666 workload->Execute();
6667
6668 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6669
6670 return ret;
6671}
6672
Jim Flynn4ed6c832019-05-20 11:02:46 +01006673LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006674 armnn::IWorkloadFactory& workloadFactory,
6675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00006676{
surmeh013537c2c2018-05-18 16:31:43 +01006677 unsigned int outputWidth = 3;
telsoa014fcda012018-03-09 14:13:49 +00006678 unsigned int outputHeight = 6;
6679 unsigned int outputChannels = 3;
6680
surmeh013537c2c2018-05-18 16:31:43 +01006681 unsigned int inputWidth1 = 3;
6682 unsigned int inputHeight1 = 6;
6683 unsigned int inputChannels1 = 2;
telsoa014fcda012018-03-09 14:13:49 +00006684
surmeh013537c2c2018-05-18 16:31:43 +01006685 unsigned int inputWidth2 = 3;
6686 unsigned int inputHeight2 = 6;
6687 unsigned int inputChannels2 = 1;
telsoa014fcda012018-03-09 14:13:49 +00006688
telsoa01c577f2c2018-08-31 09:22:23 +01006689 // Defines the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +00006690 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
6691 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
6692 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
telsoa014fcda012018-03-09 14:13:49 +00006693
Jim Flynn4ed6c832019-05-20 11:02:46 +01006694 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
telsoa014fcda012018-03-09 14:13:49 +00006695 const float scale = 0.13497836f;
6696 const int32_t offset = -7;
6697
6698 outputTensorInfo.SetQuantizationScale(scale);
6699 outputTensorInfo.SetQuantizationOffset(offset);
6700 inputTensorInfo1.SetQuantizationScale(scale);
6701 inputTensorInfo1.SetQuantizationOffset(offset);
6702 inputTensorInfo2.SetQuantizationScale(scale);
6703 inputTensorInfo2.SetQuantizationOffset(offset);
telsoa014fcda012018-03-09 14:13:49 +00006704
6705 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
6706
6707 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
surmeh013537c2c2018-05-18 16:31:43 +01006708 {
6709 1, 2, 3,
6710 4, 5, 6,
6711 7, 8, 9,
6712 10, 11, 12,
6713 13, 14, 15,
6714 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006715
surmeh013537c2c2018-05-18 16:31:43 +01006716 19, 20, 21,
6717 22, 23, 24,
6718 25, 26, 27,
6719 28, 29, 30,
6720 31, 32, 33,
6721 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006722
surmeh013537c2c2018-05-18 16:31:43 +01006723 37, 38, 39,
6724 40, 41, 42,
6725 43, 44, 45,
6726 46, 47, 48,
6727 49, 50, 51,
6728 52, 53, 54,
6729 })
telsoa014fcda012018-03-09 14:13:49 +00006730 );
6731
telsoa014fcda012018-03-09 14:13:49 +00006732 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
6733 {
surmeh013537c2c2018-05-18 16:31:43 +01006734 1, 2, 3,
6735 4, 5, 6,
6736 7, 8, 9,
6737 10, 11, 12,
6738 13, 14, 15,
6739 16, 17, 18,
telsoa014fcda012018-03-09 14:13:49 +00006740
surmeh013537c2c2018-05-18 16:31:43 +01006741 19, 20, 21,
6742 22, 23, 24,
6743 25, 26, 27,
6744 28, 29, 30,
6745 31, 32, 33,
6746 34, 35, 36,
telsoa014fcda012018-03-09 14:13:49 +00006747 })
6748 );
6749
6750 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
6751 {
surmeh013537c2c2018-05-18 16:31:43 +01006752 37, 38, 39,
6753 40, 41, 42,
telsoa014fcda012018-03-09 14:13:49 +00006754 43, 44, 45,
surmeh013537c2c2018-05-18 16:31:43 +01006755 46, 47, 48,
6756 49, 50, 51,
6757 52, 53, 54,
telsoa014fcda012018-03-09 14:13:49 +00006758 })
6759 );
6760
telsoa01c577f2c2018-08-31 09:22:23 +01006761 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006762 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
telsoa014fcda012018-03-09 14:13:49 +00006763
telsoa01c577f2c2018-08-31 09:22:23 +01006764 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006765 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
telsoa014fcda012018-03-09 14:13:49 +00006766
telsoa014fcda012018-03-09 14:13:49 +00006767
6768 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6769
6770 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6771
6772 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6773 subTensorsSupported ?
6774 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6775 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6776
6777 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6778 subTensorsSupported ?
6779 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6780 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6781
telsoa014fcda012018-03-09 14:13:49 +00006782
Jim Flynne242f2d2019-05-22 14:24:13 +01006783 armnn::ConcatQueueDescriptor data;
telsoa014fcda012018-03-09 14:13:49 +00006784 armnn::WorkloadInfo info;
6785 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6786 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +00006787 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6788
6789 data.m_ViewOrigins.push_back(window1);
6790 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +00006791
Jim Flynn4ed6c832019-05-20 11:02:46 +01006792 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
telsoa014fcda012018-03-09 14:13:49 +00006793
6794 inputHandle1->Allocate();
6795 inputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00006796 outputHandle->Allocate();
6797
6798 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6799 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00006800
Derek Lambertif30f7d32019-04-09 10:25:02 +01006801 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00006802 workload->Execute();
6803
6804 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6805
6806 return ret;
6807}
6808
Jim Flynn4ed6c832019-05-20 11:02:46 +01006809LayerTestResult<uint16_t, 3> ConcatUint16Test(
Jim Flynncbb66aa2019-05-15 13:03:54 +01006810 armnn::IWorkloadFactory& workloadFactory,
6811 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
6812{
6813 unsigned int outputWidth = 3;
6814 unsigned int outputHeight = 6;
6815 unsigned int outputChannels = 3;
6816
6817 unsigned int inputWidth1 = 3;
6818 unsigned int inputHeight1 = 6;
6819 unsigned int inputChannels1 = 2;
6820
6821 unsigned int inputWidth2 = 3;
6822 unsigned int inputHeight2 = 6;
6823 unsigned int inputChannels2 = 1;
6824
6825 // Defines the tensor descriptors.
6826 armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
6827 armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
6828 armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
6829
Jim Flynn4ed6c832019-05-20 11:02:46 +01006830 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
Jim Flynncbb66aa2019-05-15 13:03:54 +01006831 const float scale = 0.13497836f;
6832 const int32_t offset = -7;
6833
6834 outputTensorInfo.SetQuantizationScale(scale);
6835 outputTensorInfo.SetQuantizationOffset(offset);
6836 inputTensorInfo1.SetQuantizationScale(scale);
6837 inputTensorInfo1.SetQuantizationOffset(offset);
6838 inputTensorInfo2.SetQuantizationScale(scale);
6839 inputTensorInfo2.SetQuantizationOffset(offset);
6840
6841 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
6842
6843 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
6844 {
6845 1, 2, 3,
6846 4, 5, 6,
6847 7, 8, 9,
6848 10, 11, 12,
6849 13, 14, 15,
6850 16, 17, 18,
6851
6852 19, 20, 21,
6853 22, 23, 24,
6854 25, 26, 27,
6855 28, 29, 30,
6856 31, 32, 33,
6857 34, 35, 36,
6858
6859 37, 38, 39,
6860 40, 41, 42,
6861 43, 44, 45,
6862 46, 47, 48,
6863 49, 50, 51,
6864 52, 53, 54,
6865 }));
6866
6867 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
6868 {
6869 1, 2, 3,
6870 4, 5, 6,
6871 7, 8, 9,
6872 10, 11, 12,
6873 13, 14, 15,
6874 16, 17, 18,
6875
6876 19, 20, 21,
6877 22, 23, 24,
6878 25, 26, 27,
6879 28, 29, 30,
6880 31, 32, 33,
6881 34, 35, 36,
6882 }));
6883
6884 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
6885 {
6886 37, 38, 39,
6887 40, 41, 42,
6888 43, 44, 45,
6889 46, 47, 48,
6890 49, 50, 51,
6891 52, 53, 54,
6892 }));
6893
6894 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Jim Flynne242f2d2019-05-22 14:24:13 +01006895 armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006896
6897 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Jim Flynne242f2d2019-05-22 14:24:13 +01006898 armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006899
6900
6901 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6902
6903 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
6904
6905 std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
6906 subTensorsSupported ?
6907 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
6908 workloadFactory.CreateTensorHandle(inputTensorInfo1);
6909
6910 std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
6911 subTensorsSupported ?
6912 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
6913 workloadFactory.CreateTensorHandle(inputTensorInfo2);
6914
6915
Jim Flynne242f2d2019-05-22 14:24:13 +01006916 armnn::ConcatQueueDescriptor data;
Jim Flynncbb66aa2019-05-15 13:03:54 +01006917 armnn::WorkloadInfo info;
6918 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6919 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
6920 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6921
6922 data.m_ViewOrigins.push_back(window1);
6923 data.m_ViewOrigins.push_back(window2);
6924
Jim Flynn4ed6c832019-05-20 11:02:46 +01006925 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
Jim Flynncbb66aa2019-05-15 13:03:54 +01006926
6927 inputHandle1->Allocate();
6928 inputHandle2->Allocate();
6929 outputHandle->Allocate();
6930
6931 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
6932 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
6933
6934 workload->PostAllocationConfigure();
6935 workload->Execute();
6936
6937 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
6938
6939 return ret;
6940}
telsoa014fcda012018-03-09 14:13:49 +00006941
surmeh01bceff2f2018-03-29 16:29:27 +01006942namespace
telsoa014fcda012018-03-09 14:13:49 +00006943{
Sadik Armagan2999a022019-04-09 14:20:12 +01006944template <typename T>
6945LayerTestResult<T, 4> AdditionQuantizeTestHelper(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006946 armnn::IWorkloadFactory& workloadFactory,
6947 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
6948 const unsigned int shape0[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006949 const std::vector<T>& values0,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006950 float scale0,
6951 int32_t offset0,
6952 const unsigned int shape1[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006953 const std::vector<T> & values1,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006954 float scale1,
6955 int32_t offset1,
6956 const unsigned int outShape[4],
Sadik Armagan2999a022019-04-09 14:20:12 +01006957 const std::vector<T> & outValues,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00006958 float outScale,
6959 int32_t outOffset)
surmeh01bceff2f2018-03-29 16:29:27 +01006960{
Sadik Armagan2999a022019-04-09 14:20:12 +01006961 auto dataType = (std::is_same<T, uint8_t>::value ?
6962 armnn::DataType::QuantisedAsymm8 :
6963 armnn::DataType::QuantisedSymm16);
6964
6965 armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
6966 armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
6967 armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
telsoa014fcda012018-03-09 14:13:49 +00006968
surmeh01bceff2f2018-03-29 16:29:27 +01006969 inputTensorInfo0.SetQuantizationScale(scale0);
6970 inputTensorInfo0.SetQuantizationOffset(offset0);
telsoa014fcda012018-03-09 14:13:49 +00006971
surmeh01bceff2f2018-03-29 16:29:27 +01006972 inputTensorInfo1.SetQuantizationScale(scale1);
6973 inputTensorInfo1.SetQuantizationOffset(offset1);
telsoa014fcda012018-03-09 14:13:49 +00006974
surmeh01bceff2f2018-03-29 16:29:27 +01006975 outputTensorInfo.SetQuantizationScale(outScale);
6976 outputTensorInfo.SetQuantizationOffset(outOffset);
telsoa014fcda012018-03-09 14:13:49 +00006977
Sadik Armagan2999a022019-04-09 14:20:12 +01006978 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
6979 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
telsoa014fcda012018-03-09 14:13:49 +00006980
Sadik Armagan2999a022019-04-09 14:20:12 +01006981 LayerTestResult<T, 4> result(outputTensorInfo);
6982 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
6983
6984 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
6985 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
6986 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
6987
6988 armnn::AdditionQueueDescriptor data;
6989 armnn::WorkloadInfo info;
6990 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
6991 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
6992 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
6993
6994 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
6995
6996 inputHandle0->Allocate();
6997 inputHandle1->Allocate();
6998 outputHandle->Allocate();
6999
7000 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7001 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7002
Derek Lambertif30f7d32019-04-09 10:25:02 +01007003 workload->PostAllocationConfigure();
Sadik Armagan2999a022019-04-09 14:20:12 +01007004 workload->Execute();
7005
7006 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7007
7008 return result;
7009}
7010} // anonymous namespace
7011
7012LayerTestResult<uint8_t, 4> AdditionUint8Test(
7013 armnn::IWorkloadFactory& workloadFactory,
7014 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7015{
7016 const unsigned int shape0[] = { 1, 2, 2, 3 };
7017 const unsigned int shape1[] = { 1, 2, 2, 3 };
7018
7019 std::vector<uint8_t> input0(
7020 {
7021 63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
7022 203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
7023 });
7024
7025 std::vector<uint8_t> input1(
7026 {
7027 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7028 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7029 });
7030
7031 std::vector<uint8_t> output(
7032 {
7033 81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
7034 255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
7035 });
7036
7037 return AdditionQuantizeTestHelper(workloadFactory,
7038 memoryManager,
7039 shape0, input0, 7.0f, 3,
7040 shape1, input1, 7.0f, 3,
7041 shape0, output, 7.0f, 3);
7042}
7043
7044LayerTestResult<int16_t, 4> AdditionInt16Test(
7045 armnn::IWorkloadFactory& workloadFactory,
7046 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7047{
7048 const unsigned int shape0[] = { 1, 2, 2, 3 };
7049 const unsigned int shape1[] = { 1, 2, 2, 3 };
7050
7051 std::vector<int16_t> input0(
7052 {
7053 63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
7054 203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
7055 });
7056
7057 std::vector<int16_t> input1(
7058 {
7059 21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
7060 126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
7061 });
7062
7063 std::vector<int16_t> output(
7064 {
7065 84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
7066 329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
7067 });
7068
7069 return AdditionQuantizeTestHelper(workloadFactory,
7070 memoryManager,
7071 shape0, input0, 7.0f, 0,
7072 shape1, input1, 7.0f, 0,
7073 shape0, output, 7.0f, 0);
7074}
7075
7076namespace
7077{
7078template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
7079LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
7080 armnn::IWorkloadFactory& workloadFactory,
7081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7082 const unsigned int shape0[4],
7083 const std::vector<T> & values0,
7084 float scale0,
7085 int32_t offset0,
7086 const unsigned int shape1[4],
7087 const std::vector<T> & values1,
7088 float scale1,
7089 int32_t offset1,
7090 const unsigned int outShape[4],
7091 const std::vector<T> & outValues,
7092 float outScale,
7093 int32_t outOffset)
7094{
7095 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7096 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7097 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
7098
7099 inputTensorInfo0.SetQuantizationScale(scale0);
7100 inputTensorInfo0.SetQuantizationOffset(offset0);
7101
7102 inputTensorInfo1.SetQuantizationScale(scale1);
7103 inputTensorInfo1.SetQuantizationOffset(offset1);
7104
7105 outputTensorInfo.SetQuantizationScale(outScale);
7106 outputTensorInfo.SetQuantizationOffset(outOffset);
7107
7108 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7109 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7110
7111 LayerTestResult<T, 4> result(outputTensorInfo);
7112 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
telsoa014fcda012018-03-09 14:13:49 +00007113
surmeh01bceff2f2018-03-29 16:29:27 +01007114 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
telsoa014fcda012018-03-09 14:13:49 +00007115 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
telsoa014fcda012018-03-09 14:13:49 +00007116 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7117
7118 armnn::MultiplicationQueueDescriptor data;
7119 armnn::WorkloadInfo info;
surmeh01bceff2f2018-03-29 16:29:27 +01007120 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7121 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
telsoa014fcda012018-03-09 14:13:49 +00007122 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7123
7124 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
7125
surmeh01bceff2f2018-03-29 16:29:27 +01007126 inputHandle0->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007127 inputHandle1->Allocate();
telsoa014fcda012018-03-09 14:13:49 +00007128 outputHandle->Allocate();
7129
surmeh01bceff2f2018-03-29 16:29:27 +01007130 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007131 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
telsoa014fcda012018-03-09 14:13:49 +00007132
Derek Lambertif30f7d32019-04-09 10:25:02 +01007133 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007134 workload->Execute();
7135
7136 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7137
7138 return result;
7139}
surmeh01bceff2f2018-03-29 16:29:27 +01007140} // anonymous namespace
7141
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007142LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
7143 armnn::IWorkloadFactory& workloadFactory,
7144 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007145{
7146 unsigned int batchSize = 1;
7147 unsigned int channels = 2;
7148 unsigned int height = 2;
7149 unsigned int width = 3;
7150 const unsigned int shape[] = { batchSize, channels, height, width };
7151
telsoa01c577f2c2018-08-31 09:22:23 +01007152 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007153 std::vector<uint8_t> input0({
7154 62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
7155 188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
7156 });
7157
telsoa01c577f2c2018-08-31 09:22:23 +01007158 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007159 std::vector<uint8_t> input1({
7160 126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
7161 48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
7162 });
7163
telsoa01c577f2c2018-08-31 09:22:23 +01007164 // See dequantized values to the right.
surmeh01bceff2f2018-03-29 16:29:27 +01007165 std::vector<uint8_t> output(
7166 {
7167 64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
7168 77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
7169 });
7170
Sadik Armagan2999a022019-04-09 14:20:12 +01007171 // Scale/offset chosen to have output values out of range.
7172 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7173 memoryManager,
7174 shape,
7175 input0,
7176 4.0f,
7177 1,
7178 shape,
7179 input1,
7180 3.0f,
7181 -2,
7182 shape,
7183 output,
7184 1366.255f,
7185 -5);
surmeh01bceff2f2018-03-29 16:29:27 +01007186}
7187
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007188LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
7189 armnn::IWorkloadFactory& workloadFactory,
7190 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007191{
7192 const unsigned int shape0[] = { 1, 2, 2, 3 };
7193 const unsigned int shape1[] = { 1, 1, 1, 1 };
7194
7195 std::vector<uint8_t> input0({
7196 1, 2, 3, 4, 5, 6,
7197 7, 8, 9, 10, 11, 12
7198 });
7199
7200 std::vector<uint8_t> input1({2});
7201
7202 std::vector<uint8_t> output({
7203 2, 4, 6, 8, 10, 12,
7204 14, 16, 18, 20, 22, 24
7205 });
7206
Sadik Armagan2999a022019-04-09 14:20:12 +01007207 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7208 memoryManager,
7209 shape0,
7210 input0,
7211 1.0f,
7212 0,
7213 shape1,
7214 input1,
7215 1.0f,
7216 0,
7217 shape0,
7218 output,
7219 1.0f,
7220 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007221}
7222
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007223LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
7224 armnn::IWorkloadFactory& workloadFactory,
7225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01007226{
7227 const unsigned int shape0[] = { 1, 2, 2, 3 };
7228 const unsigned int shape1[] = { 1, 1, 1, 3 };
7229
7230 std::vector<uint8_t> input0({
7231 1, 2, 3, 4, 5, 6,
7232 7, 8, 9, 10, 11, 12
7233 });
7234
7235 std::vector<uint8_t> input1({1, 2, 3});
7236
7237 std::vector<uint8_t> output({
7238 1, 4, 9, 4, 10, 18,
7239 7, 16, 27, 10, 22, 36
7240 });
7241
Sadik Armagan2999a022019-04-09 14:20:12 +01007242 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7243 memoryManager,
7244 shape0,
7245 input0,
7246 1.0f,
7247 0,
7248 shape1,
7249 input1,
7250 1.0f,
7251 0,
7252 shape0,
7253 output,
7254 1.0f,
7255 0);
7256}
7257
7258LayerTestResult<int16_t, 4> MultiplicationInt16Test(
7259 armnn::IWorkloadFactory& workloadFactory,
7260 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7261{
7262 const unsigned int shape[] = { 1, 2, 2, 3 };
7263
7264 std::vector<int16_t> input0(
7265 {
7266 6, 7, 8, 9, 10, 11,
7267 12, 13, 14, 15, 16, 17
7268 });
7269
7270 std::vector<int16_t> input1(
7271 {
7272 1, 2, 3, 4, 5, 6,
7273 7, 8, 9, 10, 11, 12
7274 });
7275
7276 std::vector<int16_t> output(
7277 {
7278 6, 14, 24, 36, 50, 66,
7279 84, 104, 126, 150, 176, 204
7280 });
7281
7282 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7283 memoryManager,
7284 shape,
7285 input0,
7286 1.0f,
7287 0,
7288 shape,
7289 input1,
7290 1.0f,
7291 0,
7292 shape,
7293 output,
7294 1.0f,
7295 0);
7296}
7297
7298LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
7299 armnn::IWorkloadFactory& workloadFactory,
7300 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7301{
7302 const unsigned int shape0[] = { 1, 2, 2, 3 };
7303 const unsigned int shape1[] = { 1, 1, 1, 1 };
7304
7305 std::vector<int16_t> input0(
7306 {
7307 1, 2, 3, 4, 5, 6,
7308 7, 8, 9, 10, 11, 12
7309 });
7310
7311 std::vector<int16_t> input1({2});
7312
7313 std::vector<int16_t> output(
7314 {
7315 2, 4, 6, 8, 10, 12,
7316 14, 16, 18, 20, 22, 24
7317 });
7318
7319 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7320 memoryManager,
7321 shape0,
7322 input0,
7323 1.0f,
7324 0,
7325 shape1,
7326 input1,
7327 1.0f,
7328 0,
7329 shape0,
7330 output,
7331 1.0f,
7332 0);
7333}
7334
7335LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
7336 armnn::IWorkloadFactory& workloadFactory,
7337 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7338{
7339 const unsigned int shape0[] = { 1, 2, 2, 3 };
7340 const unsigned int shape1[] = { 1, 1, 1, 3 };
7341
7342 std::vector<int16_t> input0(
7343 {
7344 1, 2, 3, 4, 5, 6,
7345 7, 8, 9, 10, 11, 12
7346 });
7347
7348 std::vector<int16_t> input1({1, 2, 3});
7349
7350 std::vector<int16_t> output(
7351 {
7352 1, 4, 9, 4, 10, 18,
7353 7, 16, 27, 10, 22, 36
7354 });
7355
7356 return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7357 memoryManager,
7358 shape0,
7359 input0,
7360 1.0f,
7361 0,
7362 shape1,
7363 input1,
7364 1.0f,
7365 0,
7366 shape0,
7367 output,
7368 1.0f,
7369 0);
surmeh01bceff2f2018-03-29 16:29:27 +01007370}
telsoa014fcda012018-03-09 14:13:49 +00007371
David Beckf195f032018-09-06 16:46:34 +01007372namespace
7373{
Sadik Armagan2999a022019-04-09 14:20:12 +01007374template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007375LayerTestResult<T, 4> SubtractionTestHelper(
7376 armnn::IWorkloadFactory& workloadFactory,
7377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7378 const unsigned int shape0[4],
7379 const std::vector<T>& values0,
7380 float scale0,
7381 int32_t offset0,
7382 const unsigned int shape1[4],
7383 const std::vector<T> & values1,
7384 float scale1,
7385 int32_t offset1,
7386 const unsigned int outShape[4],
7387 const std::vector<T> & outValues,
7388 float outScale,
7389 int32_t outOffset)
David Beckf195f032018-09-06 16:46:34 +01007390{
Sadik Armagan2999a022019-04-09 14:20:12 +01007391 armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
7392 armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
7393 armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
David Beckf195f032018-09-06 16:46:34 +01007394
7395 inputTensorInfo0.SetQuantizationScale(scale0);
7396 inputTensorInfo0.SetQuantizationOffset(offset0);
7397
7398 inputTensorInfo1.SetQuantizationScale(scale1);
7399 inputTensorInfo1.SetQuantizationOffset(offset1);
7400
7401 outputTensorInfo.SetQuantizationScale(outScale);
7402 outputTensorInfo.SetQuantizationOffset(outOffset);
7403
7404 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
7405 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
7406
7407 LayerTestResult<T, 4> result(outputTensorInfo);
7408 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
7409
7410 std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
7411 std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
7412 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7413
7414 armnn::SubtractionQueueDescriptor data;
7415 armnn::WorkloadInfo info;
7416 AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
7417 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
7418 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
7419
7420 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
7421
7422 inputHandle0->Allocate();
7423 inputHandle1->Allocate();
7424 outputHandle->Allocate();
7425
7426 CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
7427 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
7428
Derek Lambertif30f7d32019-04-09 10:25:02 +01007429 workload->PostAllocationConfigure();
David Beckf195f032018-09-06 16:46:34 +01007430 workload->Execute();
7431
7432 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7433
7434 return result;
7435}
7436} // anonymous namespace
7437
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007438LayerTestResult<uint8_t, 4> SubtractionUint8Test(
7439 armnn::IWorkloadFactory& workloadFactory,
7440 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007441{
7442 const unsigned int shape0[] = { 1, 1, 2, 2 };
7443 const unsigned int shape1[] = { 1, 1, 2, 2 };
7444
7445 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7446 std::vector<uint8_t> input1({ 1, 2, 1, 2 });
7447 std::vector<uint8_t> output({ 3, 3, 5, 5 });
7448
Sadik Armagan2999a022019-04-09 14:20:12 +01007449 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7450 memoryManager,
7451 shape0, input0, 0.5f, 2,
7452 shape1, input1, 1.0f, 0,
7453 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007454}
7455
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007456LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
7457 armnn::IWorkloadFactory& workloadFactory,
7458 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007459{
7460 const unsigned int shape0[] = { 1, 1, 2, 2 };
7461 const unsigned int shape1[] = { 1, 1, 1, 1 };
7462
7463 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7464 std::vector<uint8_t> input1({ 2 });
7465 std::vector<uint8_t> output({ 5, 6, 7, 8 });
7466
Sadik Armagan2999a022019-04-09 14:20:12 +01007467 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7468 memoryManager,
7469 shape0, input0, 0.5f, 2,
7470 shape1, input1, 1.0f, 0,
7471 shape0, output, 1.0f, 3);
David Beckf195f032018-09-06 16:46:34 +01007472}
7473
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007474LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
7475 armnn::IWorkloadFactory& workloadFactory,
7476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007477{
7478 const unsigned int shape0[] = { 1, 1, 2, 2 };
7479 const unsigned int shape1[] = { 1, 1, 2, 1 };
7480
7481 std::vector<uint8_t> input0({ 10, 12, 14, 16 });
7482 std::vector<uint8_t> input1({ 2, 1 });
7483 std::vector<uint8_t> output({ 8, 11, 12, 15 });
7484
Sadik Armagan2999a022019-04-09 14:20:12 +01007485 return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
7486 memoryManager,
7487 shape0, input0, 1.0f, 0,
7488 shape1, input1, 1.0f, 0,
7489 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007490}
7491
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007492LayerTestResult<float, 4> SubtractionTest(
7493 armnn::IWorkloadFactory& workloadFactory,
7494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007495{
7496 const unsigned int shape0[] = { 1, 1, 2, 2 };
7497 const unsigned int shape1[] = { 1, 1, 2, 2 };
7498
7499 std::vector<float> input0({ 1, 2, 3, 4 });
7500 std::vector<float> input1({ 1, -1, 0, 2 });
7501 std::vector<float> output({ 0, 3, 3, 2 });
7502
Sadik Armagan2999a022019-04-09 14:20:12 +01007503 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7504 memoryManager,
7505 shape0, input0, 1.0f, 0,
7506 shape1, input1, 1.0f, 0,
7507 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007508}
7509
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007510LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
7511 armnn::IWorkloadFactory& workloadFactory,
7512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007513{
7514 const unsigned int shape0[] = { 1, 1, 2, 2 };
7515 const unsigned int shape1[] = { 1, 1, 1, 1 };
7516
7517 std::vector<float> input0({ 1, 2, 3, 4 });
7518 std::vector<float> input1({ 10 });
7519 std::vector<float> output({ -9, -8, -7, -6 });
7520
Sadik Armagan2999a022019-04-09 14:20:12 +01007521 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7522 memoryManager,
7523 shape0, input0, 1.0f, 0,
7524 shape1, input1, 1.0f, 0,
7525 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007526}
7527
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007528LayerTestResult<float, 4> SubtractionBroadcastTest(
7529 armnn::IWorkloadFactory& workloadFactory,
7530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
David Beckf195f032018-09-06 16:46:34 +01007531{
7532 const unsigned int shape0[] = { 1, 1, 2, 2 };
7533 const unsigned int shape1[] = { 1, 1, 1, 2 };
7534
7535 std::vector<float> input0({ 1, 2, 3, 4 });
7536 std::vector<float> input1({ 10, -5 });
7537 std::vector<float> output({ -9, 7, -7, 9 });
7538
Sadik Armagan2999a022019-04-09 14:20:12 +01007539 return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
7540 memoryManager,
7541 shape0, input0, 1.0f, 0,
7542 shape1, input1, 1.0f, 0,
7543 shape0, output, 1.0f, 0);
7544}
7545
7546LayerTestResult<int16_t, 4> SubtractionInt16Test(
7547 armnn::IWorkloadFactory& workloadFactory,
7548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7549{
7550 const unsigned int shape0[] = { 1, 1, 2, 2 };
7551 const unsigned int shape1[] = { 1, 1, 2, 2 };
7552
7553 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7554 std::vector<int16_t> input1({ 1, 2, 1, 2 });
7555 std::vector<int16_t> output({ 3, 3, 5, 5 });
7556
7557 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7558 memoryManager,
7559 shape0, input0, 0.5f, 0,
7560 shape1, input1, 1.0f, 0,
7561 shape0, output, 1.0f, 0);
7562}
7563
7564LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
7565 armnn::IWorkloadFactory& workloadFactory,
7566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7567{
7568 const unsigned int shape0[] = { 1, 1, 2, 2 };
7569 const unsigned int shape1[] = { 1, 1, 1, 1 };
7570
7571 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7572 std::vector<int16_t> input1({ 2 });
7573 std::vector<int16_t> output({ 3, 4, 5, 6 });
7574
7575 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7576 memoryManager,
7577 shape0, input0, 0.5f, 0,
7578 shape1, input1, 1.0f, 0,
7579 shape0, output, 1.0f, 0);
7580}
7581
7582LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
7583 armnn::IWorkloadFactory& workloadFactory,
7584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7585{
7586 const unsigned int shape0[] = { 1, 1, 2, 2 };
7587 const unsigned int shape1[] = { 1, 1, 2, 1 };
7588
7589 std::vector<int16_t> input0({ 10, 12, 14, 16 });
7590 std::vector<int16_t> input1({ 2, 1 });
7591 std::vector<int16_t> output({ 8, 11, 12, 15 });
7592
7593 return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
7594 memoryManager,
7595 shape0, input0, 1.0f, 0,
7596 shape1, input1, 1.0f, 0,
7597 shape0, output, 1.0f, 0);
David Beckf195f032018-09-06 16:46:34 +01007598}
7599
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007600LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
7601 armnn::IWorkloadFactory& workloadFactory,
7602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007603{
7604 constexpr unsigned int inputWidth = 4;
7605 constexpr unsigned int inputHeight = 4;
7606 constexpr unsigned int inputChannels = 1;
7607 constexpr unsigned int inputBatchSize = 1;
7608
7609 constexpr unsigned int outputWidth = inputWidth;
7610 constexpr unsigned int outputHeight = inputHeight;
7611 constexpr unsigned int outputChannels = inputChannels;
7612 constexpr unsigned int outputBatchSize = inputBatchSize;
7613
7614 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7615 armnn::DataType::QuantisedAsymm8);
7616 inputTensorInfo.SetQuantizationScale(1.5f);
7617 inputTensorInfo.SetQuantizationOffset(-3);
7618
7619 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7620 armnn::DataType::QuantisedAsymm8);
7621 outputTensorInfo.SetQuantizationScale(1.5f);
7622 outputTensorInfo.SetQuantizationOffset(-3);
7623
7624 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7625 1, 2, 3, 4,
7626 2, 3, 4, 5,
7627 3, 4, 5, 6,
7628 4, 5, 6, 7
7629 }));
7630
7631 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7632 result.outputExpected = input;
7633
7634 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7635 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7636
7637 armnn::ResizeBilinearQueueDescriptor descriptor;
7638 armnn::WorkloadInfo info;
7639 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7640 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7641
7642 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7643
7644 inputHandle->Allocate();
7645 outputHandle->Allocate();
7646 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7647
Derek Lambertif30f7d32019-04-09 10:25:02 +01007648 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007649 workload->Execute();
7650
7651 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7652 return result;
7653}
7654
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007655LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
7656 armnn::IWorkloadFactory& workloadFactory,
7657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007658{
7659 constexpr unsigned int inputWidth = 2;
7660 constexpr unsigned int inputHeight = 2;
7661 constexpr unsigned int inputChannels = 1;
7662 constexpr unsigned int inputBatchSize = 1;
7663
7664 constexpr unsigned int outputWidth = inputWidth / 2;
7665 constexpr unsigned int outputHeight = inputHeight / 2;
7666 constexpr unsigned int outputChannels = inputChannels;
7667 constexpr unsigned int outputBatchSize = inputBatchSize;
7668
7669 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7670 armnn::DataType::QuantisedAsymm8);
7671 inputTensorInfo.SetQuantizationScale(0.1567f);
7672 inputTensorInfo.SetQuantizationOffset(1);
7673
7674 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7675 armnn::DataType::QuantisedAsymm8);
7676 outputTensorInfo.SetQuantizationScale(0.1567f);
7677 outputTensorInfo.SetQuantizationOffset(1);
7678
7679 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7680 1, 255,
7681 200, 250
7682 }));
7683
7684 // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
7685 // then figures out the interpolants and weights. Note this is different to projecting the centre of the
telsoa01c577f2c2018-08-31 09:22:23 +01007686 // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
telsoa014fcda012018-03-09 14:13:49 +00007687 // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
7688 // the centre).
7689 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7690 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7691 1
7692 }));
7693
7694 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7695 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7696
7697 armnn::ResizeBilinearQueueDescriptor descriptor;
7698 armnn::WorkloadInfo info;
7699 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7700 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7701
7702 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7703
7704 inputHandle->Allocate();
7705 outputHandle->Allocate();
7706 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7707
Derek Lambertif30f7d32019-04-09 10:25:02 +01007708 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007709 workload->Execute();
7710
7711 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7712 return result;
7713}
7714
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007715LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
7716 armnn::IWorkloadFactory& workloadFactory,
7717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007718{
7719 constexpr unsigned int inputWidth = 4;
7720 constexpr unsigned int inputHeight = 4;
7721 constexpr unsigned int inputChannels = 1;
7722 constexpr unsigned int inputBatchSize = 1;
7723
7724 constexpr unsigned int outputWidth = inputWidth / 2;
7725 constexpr unsigned int outputHeight = inputHeight / 2;
7726 constexpr unsigned int outputChannels = inputChannels;
7727 constexpr unsigned int outputBatchSize = inputBatchSize;
7728
7729 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7730 armnn::DataType::QuantisedAsymm8);
7731 inputTensorInfo.SetQuantizationScale(3.141592f);
7732 inputTensorInfo.SetQuantizationOffset(3);
7733
7734 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7735 armnn::DataType::QuantisedAsymm8);
7736 outputTensorInfo.SetQuantizationScale(3.141592f);
7737 outputTensorInfo.SetQuantizationOffset(3);
7738
7739 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7740 1, 2, 3, 4,
7741 2, 3, 4, 5,
7742 3, 4, 5, 6,
7743 4, 5, 6, 7
7744 }));
7745
7746 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7747 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7748 1, 3,
7749 3, 5
7750 }));
7751
7752 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7753 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7754
7755 armnn::ResizeBilinearQueueDescriptor descriptor;
7756 armnn::WorkloadInfo info;
7757 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7758 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7759
7760 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7761
7762 inputHandle->Allocate();
7763 outputHandle->Allocate();
7764 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7765
Derek Lambertif30f7d32019-04-09 10:25:02 +01007766 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007767 workload->Execute();
7768
7769 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7770 return result;
7771}
7772
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007773LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
7774 armnn::IWorkloadFactory& workloadFactory,
7775 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007776{
7777 constexpr unsigned int inputWidth = 3;
7778 constexpr unsigned int inputHeight = 2;
7779 constexpr unsigned int inputChannels = 1;
7780 constexpr unsigned int inputBatchSize = 1;
7781
7782 constexpr unsigned int outputWidth = 2;
7783 constexpr unsigned int outputHeight = 1;
7784 constexpr unsigned int outputChannels = inputChannels;
7785 constexpr unsigned int outputBatchSize = inputBatchSize;
7786
7787 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7788 armnn::DataType::QuantisedAsymm8);
7789 inputTensorInfo.SetQuantizationScale(1.5f);
7790 inputTensorInfo.SetQuantizationOffset(-1);
7791
7792 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7793 armnn::DataType::QuantisedAsymm8);
7794 outputTensorInfo.SetQuantizationScale(1.5f);
7795 outputTensorInfo.SetQuantizationOffset(-1);
7796
7797 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7798 1, 2, 3, // 3.0, 4.5, 6.0
7799 5, 8, 13 // 9.0, 13.5, 21.0
7800 }));
7801
7802 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7803 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7804 1, 3 // 3.0, 5.25
7805 }));
7806
7807 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7808 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7809
7810 armnn::ResizeBilinearQueueDescriptor descriptor;
7811 armnn::WorkloadInfo info;
7812 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7813 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7814
7815 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7816
7817 inputHandle->Allocate();
7818 outputHandle->Allocate();
7819
7820 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7821
Derek Lambertif30f7d32019-04-09 10:25:02 +01007822 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007823 workload->Execute();
7824
7825 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7826 return result;
7827}
7828
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007829LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
7830 armnn::IWorkloadFactory& workloadFactory,
7831 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00007832{
7833 constexpr unsigned int inputWidth = 2;
7834 constexpr unsigned int inputHeight = 3;
7835 constexpr unsigned int inputChannels = 1;
7836 constexpr unsigned int inputBatchSize = 1;
7837
7838 constexpr unsigned int outputWidth = 5;
7839 constexpr unsigned int outputHeight = 3;
7840 constexpr unsigned int outputChannels = inputChannels;
7841 constexpr unsigned int outputBatchSize = inputBatchSize;
7842
7843 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
7844 armnn::DataType::QuantisedAsymm8);
7845 inputTensorInfo.SetQuantizationScale(0.010765f);
7846 inputTensorInfo.SetQuantizationOffset(7);
7847
7848 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
7849 armnn::DataType::QuantisedAsymm8);
7850 outputTensorInfo.SetQuantizationScale(0.010132f);
7851 outputTensorInfo.SetQuantizationOffset(-18);
7852
7853 auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
7854 24, 228, // 0.183005, 2.379065,
7855 105, 128, // 1.05497, 1.302565
7856 230, 71 // 2.400595, 0.68896
7857 }));
7858
7859 LayerTestResult<uint8_t, 4> result(outputTensorInfo);
7860 result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
7861 0, 87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
7862 86, 96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
7863 219, 151, 84, 50, 50 // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
7864 }));
7865
7866 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7867 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7868
7869 armnn::ResizeBilinearQueueDescriptor descriptor;
7870 armnn::WorkloadInfo info;
7871 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7872 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7873
7874 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
7875
7876 inputHandle->Allocate();
7877 outputHandle->Allocate();
7878 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
7879
Derek Lambertif30f7d32019-04-09 10:25:02 +01007880 workload->PostAllocationConfigure();
telsoa014fcda012018-03-09 14:13:49 +00007881 workload->Execute();
7882
7883 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
7884 return result;
7885}
7886
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007887LayerTestResult<float, 2> Rsqrt2dTestCommon(
7888 armnn::IWorkloadFactory& workloadFactory,
7889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
7890 const armnn::TensorInfo inputTensorInfo,
7891 const armnn::TensorInfo outputTensorInfo,
7892 std::vector<float> inputValues,
7893 std::vector<float> expectedOutputValues)
7894{
7895 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
7896
7897 LayerTestResult<float, 2> result(outputTensorInfo);
7898 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
7899
7900 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7901 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7902
7903 armnn::RsqrtQueueDescriptor descriptor;
7904
7905 armnn::WorkloadInfo info;
7906
7907 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7908 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7909
7910 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7911
7912 inputHandle->Allocate();
7913 outputHandle->Allocate();
7914
7915 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
7916
Derek Lambertif30f7d32019-04-09 10:25:02 +01007917 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007918 workload->Execute();
7919
7920 CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
7921
7922 return result;
7923}
7924LayerTestResult<float, 2> Rsqrt2dTest(
7925 armnn::IWorkloadFactory& workloadFactory,
7926 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7927{
7928 const armnn::TensorShape inputShape{ 2, 2 };
7929 const armnn::TensorShape outputShape{ 2, 2 };
7930
7931 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7932 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7933
7934 std::vector<float> inputValues
7935 {
7936 1.f, 4.f,
7937 16.f, 25.f
7938 };
7939
7940 std::vector<float> expectedOutputValues
7941 {
7942 1.f, 0.5f,
7943 0.25f, 0.2f
7944 };
7945
7946 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
7947 inputTensorInfo, outputTensorInfo,
7948 inputValues, expectedOutputValues);
7949}
7950
7951LayerTestResult<float, 3> Rsqrt3dTest(
7952 armnn::IWorkloadFactory& workloadFactory,
7953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
7954{
7955 const armnn::TensorShape inputShape{ 3, 1, 2 };
7956 const armnn::TensorShape outputShape{ 3, 1, 2 };
7957
7958 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
7959 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
7960
7961 std::vector<float> inputValues
7962 {
7963 1.f, 4.f, 16.f,
7964 25.f, 64.f, 100.f
7965 };
7966
7967 std::vector<float> expectedOutputValues
7968 {
7969 1.f, 0.5f, 0.25f,
7970 0.2f, 0.125f, 0.1f
7971 };
7972
7973 auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
7974
7975 LayerTestResult<float, 3> result(outputTensorInfo);
7976 result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
7977
7978 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
7979 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
7980
7981 armnn::RsqrtQueueDescriptor descriptor;
7982
7983 armnn::WorkloadInfo info;
7984
7985 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
7986 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
7987
7988 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
7989
7990 inputHandle->Allocate();
7991 outputHandle->Allocate();
7992
7993 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
7994
Derek Lambertif30f7d32019-04-09 10:25:02 +01007995 workload->PostAllocationConfigure();
Mohamed Nour Abouelseouda1d3c6a2018-12-27 12:39:16 +00007996 workload->Execute();
7997
7998 CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
7999
8000 return result;
8001}
8002
8003LayerTestResult<float, 2> RsqrtZeroTest(
8004 armnn::IWorkloadFactory& workloadFactory,
8005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8006{
8007 const armnn::TensorShape inputShape{ 1, 2 };
8008 const armnn::TensorShape outputShape{ 1, 2 };
8009
8010 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
8011 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
8012
8013 std::vector<float> inputValues
8014 {
8015 0.f, -0.f
8016 };
8017
8018 std::vector<float> expectedOutputValues
8019 {
8020 INFINITY, -INFINITY
8021 };
8022
8023 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
8024 inputTensorInfo, outputTensorInfo,
8025 inputValues, expectedOutputValues);
8026}
8027
8028LayerTestResult<float, 2> RsqrtNegativeTest(
8029 armnn::IWorkloadFactory& workloadFactory,
8030 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8031{
8032 const armnn::TensorShape inputShape{ 1, 2 };
8033 const armnn::TensorShape outputShape{ 1, 2 };
8034
8035 const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
8036 const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
8037
8038 std::vector<float> inputValues
8039 {
8040 -25.f, -16.f
8041 };
8042
8043 std::vector<float> expectedOutputValues
8044 {
8045 -NAN, -NAN
8046 };
8047
8048 return Rsqrt2dTestCommon(workloadFactory, memoryManager,
8049 inputTensorInfo, outputTensorInfo,
8050 inputValues, expectedOutputValues);
8051}
8052
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008053LayerTestResult<float, 4> BatchNormTest(
8054 armnn::IWorkloadFactory& workloadFactory,
8055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008056{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008057 // BatchSize: 1
8058 // Channels: 2
8059 // Height: 3
8060 // Width: 2
8061
8062 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8063 std::vector<float> inputValues
8064 {
8065 // Batch 0, Channel 0, Height (3) x Width (2)
8066 1.f, 4.f,
8067 4.f, 2.f,
8068 1.f, 6.f,
8069
8070 // Batch 0, Channel 1, Height (3) x Width (2)
8071 1.f, 1.f,
8072 4.f, 1.f,
8073 -2.f, 4.f
8074 };
8075 std::vector<float> expectedOutputValues
8076 {
8077 // Batch 0, Channel 0, Height (3) x Width (2)
8078 1.f, 4.f,
8079 4.f, 2.f,
8080 1.f, 6.f,
8081
8082 // Batch 0, Channel 1, Height (3) x Width (2)
8083 3.f, 3.f,
8084 4.f, 3.f,
8085 2.f, 4.f
8086 };
8087
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008088 return BatchNormTestImpl<armnn::DataType::Float32>(
8089 workloadFactory, memoryManager,
8090 inputOutputShape, inputValues, expectedOutputValues,
8091 0.f, 0, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008092}
8093
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008094LayerTestResult<float, 4> BatchNormNhwcTest(
8095 armnn::IWorkloadFactory& workloadFactory,
8096 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008097{
8098 // BatchSize: 1
8099 // Height: 3
8100 // Width: 2
8101 // Channels: 2
8102
8103 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8104 std::vector<float> inputValues
8105 {
8106 // Batch 0, Height 0, Width (2) x Channel (2)
8107 1.f, 1.f,
8108 4.f, 1.f,
8109
8110 // Batch 0, Height 1, Width (2) x Channel (2)
8111 4.f, 4.f,
8112 2.f, 1.f,
8113
8114 // Batch 0, Height 2, Width (2) x Channel (2)
8115 1.f, -2.f,
8116 6.f, 4.f
8117 };
8118 std::vector<float> expectedOutputValues
8119 {
8120 // Batch 0, Height 0, Width (2) x Channel (2)
8121 1.f, 3.f,
8122 4.f, 3.f,
8123
8124 // Batch 0, Height 1, Width (2) x Channel (2)
8125 4.f, 4.f,
8126 2.f, 3.f,
8127
8128 // Batch 0, Height 2, Width (2) x Channel (2)
8129 1.f, 2.f,
8130 6.f, 4.f
8131 };
8132
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008133 return BatchNormTestImpl<armnn::DataType::Float32>(
8134 workloadFactory, memoryManager,
8135 inputOutputShape, inputValues, expectedOutputValues,
8136 0.f, 0, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008137}
8138
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008139LayerTestResult<uint8_t, 4> BatchNormUint8Test(
8140 armnn::IWorkloadFactory& workloadFactory,
8141 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008142{
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008143 // BatchSize: 1
8144 // Channels: 2
8145 // Height: 3
8146 // Width: 2
8147
8148 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
8149 std::vector<float> inputValues
8150 {
8151 // Batch 0, Channel 0, Height (3) x Width (2)
8152 1.f, 4.f,
8153 4.f, 2.f,
8154 1.f, 6.f,
8155
8156 // Batch 0, Channel 1, Height (3) x Width (2)
8157 1.f, 1.f,
8158 4.f, 1.f,
8159 -2.f, 4.f
8160 };
8161 std::vector<float> expectedOutputValues
8162 {
8163 // Batch 0, Channel 0, Height (3) x Width (2)
8164 1.f, 4.f,
8165 4.f, 2.f,
8166 1.f, 6.f,
8167
8168 // Batch 0, Channel 1, Height (3) x Width (2)
8169 3.f, 3.f,
8170 4.f, 3.f,
8171 2.f, 4.f
8172 };
8173
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008174 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
8175 workloadFactory, memoryManager,
8176 inputOutputShape, inputValues, expectedOutputValues,
8177 1.f/20.f, 50, armnn::DataLayout::NCHW);
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008178}
8179
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008180LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
8181 armnn::IWorkloadFactory& workloadFactory,
8182 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh8eb675e2018-10-17 14:43:29 +01008183{
8184 // BatchSize: 1
8185 // Height: 3
8186 // Width: 2
8187 // Channels: 2
8188
8189 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
8190 std::vector<float> inputValues
8191 {
8192 // Batch 0, Height 0, Width (2) x Channel (2)
8193 1.f, 1.f,
8194 4.f, 1.f,
8195
8196 // Batch 0, Height 1, Width (2) x Channel (2)
8197 4.f, 4.f,
8198 2.f, 1.f,
8199
8200 // Batch 0, Height 2, Width (2) x Channel (2)
8201 1.f, -2.f,
8202 6.f, 4.f
8203 };
8204 std::vector<float> expectedOutputValues
8205 {
8206 // Batch 0, Height 0, Width (2) x Channel (2)
8207 1.f, 3.f,
8208 4.f, 3.f,
8209
8210 // Batch 0, Height 1, Width (2) x Channel (2)
8211 4.f, 4.f,
8212 2.f, 3.f,
8213
8214 // Batch 0, Height 2, Width (2) x Channel (2)
8215 1.f, 2.f,
8216 6.f, 4.f
8217 };
8218
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008219 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
8220 (workloadFactory, memoryManager,
8221 inputOutputShape, inputValues, expectedOutputValues,
8222 1.f/20.f, 50, armnn::DataLayout::NHWC);
telsoa014fcda012018-03-09 14:13:49 +00008223}
8224
Nina Drozd58ef2c62019-05-16 12:09:18 +01008225LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008226 armnn::IWorkloadFactory& workloadFactory,
8227 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008228{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008229 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
telsoa014fcda012018-03-09 14:13:49 +00008230}
8231
Nina Drozd58ef2c62019-05-16 12:09:18 +01008232LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
8233 armnn::IWorkloadFactory& workloadFactory,
8234 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8235{
8236 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);
8237}
8238
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008239LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
8240 armnn::IWorkloadFactory& workloadFactory,
8241 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008242{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008243 return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008244}
8245
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008246LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
8247 armnn::IWorkloadFactory& workloadFactory,
8248 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008249{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008250 return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008251}
8252
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008253LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
8254 armnn::IWorkloadFactory& workloadFactory,
8255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008256{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008257 return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008258}
8259
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008260LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
8261 armnn::IWorkloadFactory& workloadFactory,
8262 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008263{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008264 return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8265 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008266}
8267
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008268LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
8269 armnn::IWorkloadFactory& workloadFactory,
8270 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008271{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008272 return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8273 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008274}
8275
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008276LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
8277 armnn::IWorkloadFactory& workloadFactory,
8278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008279{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008280 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008281}
8282
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008283LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
8284 armnn::IWorkloadFactory& workloadFactory,
8285 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008286{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008287 return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008288}
8289
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008290LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
8291 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008292 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8293 bool useSubtensor)
telsoa014fcda012018-03-09 14:13:49 +00008294{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008295 return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8296 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008297}
8298
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008299LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
8300 armnn::IWorkloadFactory& workloadFactory,
8301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008302{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008303 return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008304}
8305
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008306LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
8307 armnn::IWorkloadFactory& workloadFactory,
8308 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008309{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008310 return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8311 workloadFactory, memoryManager, 0.5f, -1);
telsoa014fcda012018-03-09 14:13:49 +00008312}
8313
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008314LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
8315 armnn::IWorkloadFactory& workloadFactory,
narpra015cdda352018-11-19 15:30:27 +00008316 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8317 bool useSubtensor)
8318{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008319 return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
8320 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008321}
8322
8323LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
8324 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008325 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008326{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008327 return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008328}
8329
8330LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
8331 armnn::IWorkloadFactory& workloadFactory,
8332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8333{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008334 return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008335}
8336
8337LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
8338 armnn::IWorkloadFactory& workloadFactory,
8339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8340{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008341 return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008342}
8343
8344LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
8345 armnn::IWorkloadFactory& workloadFactory,
8346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
8347{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008348 return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8349 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
narpra015cdda352018-11-19 15:30:27 +00008350}
8351
8352LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
8353 armnn::IWorkloadFactory& workloadFactory,
8354 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8355{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008356 return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
8357 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008358}
8359
8360LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
8361 armnn::IWorkloadFactory& workloadFactory,
8362 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8363{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008364 return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
8365 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008366}
8367
8368LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
8369 armnn::IWorkloadFactory& workloadFactory,
8370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
8371{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008372 return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
8373 workloadFactory, memoryManager, 0.5f, -1);
narpra015cdda352018-11-19 15:30:27 +00008374}
8375
8376LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
8377 armnn::IWorkloadFactory& workloadFactory,
8378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8379 bool useSubtensor)
8380{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008381 return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
8382 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
telsoa014fcda012018-03-09 14:13:49 +00008383}
8384
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008385LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
8386 armnn::IWorkloadFactory& workloadFactory,
8387 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8388 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008389{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008390 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
8391 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008392}
8393
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008394LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
8395 armnn::IWorkloadFactory& workloadFactory,
8396 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8397 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008398{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008399 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008400 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008401}
8402
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008403LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
8404 armnn::IWorkloadFactory& workloadFactory,
8405 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8406 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008407{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008408 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
8409 workloadFactory, memoryManager, forceNoPadding);
telsoa014fcda012018-03-09 14:13:49 +00008410}
8411
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008412LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
8413 armnn::IWorkloadFactory& workloadFactory,
8414 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8415 bool forceNoPadding)
telsoa014fcda012018-03-09 14:13:49 +00008416{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008417 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008418 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008419}
8420
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008421LayerTestResult<float, 4> SimpleMaxPooling2dTest(
8422 armnn::IWorkloadFactory& workloadFactory,
8423 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008424 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008425{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008426 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008427}
8428
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008429LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
8430 armnn::IWorkloadFactory& workloadFactory,
8431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008432 const armnn::DataLayout dataLayout)
Francis Murtagh043d0d02018-10-05 14:08:48 +01008433{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008434 return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +01008435}
8436
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008437LayerTestResult<float, 4> SimpleAveragePooling2dTest(
8438 armnn::IWorkloadFactory& workloadFactory,
8439 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008440 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008441{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008442 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
James Conroy69482272018-10-19 10:41:35 +01008443}
8444
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008445LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
8446 armnn::IWorkloadFactory& workloadFactory,
8447 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008448 const armnn::DataLayout dataLayout)
James Conroy69482272018-10-19 10:41:35 +01008449{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008450 return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008451 workloadFactory, memoryManager, dataLayout, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008452}
8453
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008454LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
8455 armnn::IWorkloadFactory& workloadFactory,
8456 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8457 bool forceNoPadding)
surmeh01bceff2f2018-03-29 16:29:27 +01008458{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008459 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008460 workloadFactory, memoryManager, forceNoPadding);
surmeh01bceff2f2018-03-29 16:29:27 +01008461}
8462
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008463LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
8464 armnn::IWorkloadFactory& workloadFactory,
8465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008466{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008467 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008468}
8469
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008470LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
8471 armnn::IWorkloadFactory& workloadFactory,
8472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008473{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008474 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8475 workloadFactory, memoryManager, 0.5, -1);
telsoa014fcda012018-03-09 14:13:49 +00008476}
8477
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008478LayerTestResult<float, 4> SimpleL2Pooling2dTest(
8479 armnn::IWorkloadFactory& workloadFactory,
8480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008481 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008482{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008483 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008484}
8485
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008486LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
8487 armnn::IWorkloadFactory& workloadFactory,
8488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +00008489 const armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +00008490{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008491 return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +00008492}
8493
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008494LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
8495 armnn::IWorkloadFactory& workloadFactory,
8496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008497{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008498 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008499}
8500
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008501LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
8502 armnn::IWorkloadFactory& workloadFactory,
8503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008504{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008505 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008506}
8507
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008508LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
8509 armnn::IWorkloadFactory& workloadFactory,
8510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008511{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008512 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008513}
8514
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008515LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
8516 armnn::IWorkloadFactory& workloadFactory,
8517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008518{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008519 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008520}
8521
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008522LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
8523 armnn::IWorkloadFactory& workloadFactory,
8524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008525{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008526 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008527}
8528
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008529LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
8530 armnn::IWorkloadFactory& workloadFactory,
8531 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008532{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008533 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008534}
8535
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008536LayerTestResult<float, 4> L2Pooling2dSize7Test(
8537 armnn::IWorkloadFactory& workloadFactory,
8538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008539{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008540 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008541}
8542
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008543LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
8544 armnn::IWorkloadFactory& workloadFactory,
8545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008546{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008547 return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008548}
8549
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008550LayerTestResult<float, 4> L2Pooling2dSize9Test(
8551 armnn::IWorkloadFactory& workloadFactory,
8552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008553{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008554 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008555}
8556
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008557LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
8558 armnn::IWorkloadFactory& workloadFactory,
8559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008560{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008561 return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008562}
8563
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008564LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
8565 armnn::IWorkloadFactory& workloadFactory,
8566 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008567{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008568 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008569}
8570
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008571LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
8572 armnn::IWorkloadFactory& workloadFactory,
8573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008574{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008575 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008576}
8577
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008578LayerTestResult<float, 4> ComparePooling2dTest(
8579 armnn::IWorkloadFactory& workloadFactory,
8580 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8581 armnn::IWorkloadFactory& refWorkloadFactory,
8582 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008583{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008584 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008585 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
telsoa014fcda012018-03-09 14:13:49 +00008586}
8587
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008588LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
8589 armnn::IWorkloadFactory& workloadFactory,
8590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8591 armnn::IWorkloadFactory& refWorkloadFactory,
8592 armnn::PoolingAlgorithm poolingType)
telsoa014fcda012018-03-09 14:13:49 +00008593{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008594 return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008595 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
telsoa014fcda012018-03-09 14:13:49 +00008596}
8597
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008598LayerTestResult<float, 2> FullyConnectedLargeTest(
8599 armnn::IWorkloadFactory& workloadFactory,
8600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8601 bool transposeWeights)
telsoa014fcda012018-03-09 14:13:49 +00008602{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008603 return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
telsoa014fcda012018-03-09 14:13:49 +00008604}
8605
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008606LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
8607 armnn::IWorkloadFactory& workloadFactory,
8608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008609{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008610 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008611}
8612
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008613LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
8614 armnn::IWorkloadFactory& workloadFactory,
8615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008616{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008617 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8618 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008619}
8620
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008621LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
8622 armnn::IWorkloadFactory& workloadFactory,
8623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008624{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008625 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008626}
8627
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008628LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
8629 armnn::IWorkloadFactory& workloadFactory,
8630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008631{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008632 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8633 workloadFactory, memoryManager, 1.0f, -5);
telsoa014fcda012018-03-09 14:13:49 +00008634}
8635
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008636LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
8637 armnn::IWorkloadFactory& workloadFactory,
8638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008639{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008640 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008641}
8642
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008643LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
8644 armnn::IWorkloadFactory& workloadFactory,
8645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008647 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
8648 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008649}
8650
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008651LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
8652 armnn::IWorkloadFactory& workloadFactory,
8653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008654{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008655 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
8656 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008657}
8658
8659LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008660 armnn::IWorkloadFactory& workloadFactory,
8661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008662{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008663 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
8664 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008665}
8666
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008667LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
8668 armnn::IWorkloadFactory& workloadFactory,
8669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008670{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008671 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008672}
8673
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008674LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
8675 armnn::IWorkloadFactory& workloadFactory,
8676 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008677{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008678 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
8679 workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008680}
8681
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008682LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
8683 armnn::IWorkloadFactory& workloadFactory,
8684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008685{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008686 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008687}
8688
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008689LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
8690 armnn::IWorkloadFactory& workloadFactory,
8691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008692{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008693 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008694}
8695
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008696LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
8697 armnn::IWorkloadFactory& workloadFactory,
8698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008700 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008701}
8702
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008703LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
8704 armnn::IWorkloadFactory& workloadFactory,
8705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008706{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008707 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008708}
8709
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008710LayerTestResult<float, 4> SimplePermuteFloat32Test(
8711 armnn::IWorkloadFactory& workloadFactory,
8712 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008713{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008714 return SimplePermuteFloat32TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008715};
8716
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008717LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(
8718 armnn::IWorkloadFactory& workloadFactory,
8719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
telsoa014fcda012018-03-09 14:13:49 +00008720{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008721 return SimplePermuteUint8TestCommon(workloadFactory, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00008722};
surmeh01bceff2f2018-03-29 16:29:27 +01008723
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008724LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(
8725 armnn::IWorkloadFactory& workloadFactory,
8726 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008727{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008728 return PermuteFloat32ValueSet1TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008729};
8730
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008731LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(
8732 armnn::IWorkloadFactory& workloadFactory,
8733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008734{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008735 return PermuteFloat32ValueSet2TestCommon(workloadFactory, memoryManager);
surmeh01bceff2f2018-03-29 16:29:27 +01008736};
8737
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008738LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(
8739 armnn::IWorkloadFactory& workloadFactory,
8740 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
surmeh01bceff2f2018-03-29 16:29:27 +01008741{
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008742 return PermuteFloat32ValueSet3TestCommon(workloadFactory, memoryManager);
narpra011e4c31d2018-09-28 11:07:51 +01008743};
8744
8745namespace
8746{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008747
narpra011e4c31d2018-09-28 11:07:51 +01008748template <typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008749LayerTestResult<T, OutputDim> MeanTestHelper(
8750 armnn::IWorkloadFactory& workloadFactory,
8751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
8752 const unsigned int* inputShape,
8753 const std::vector<T>& inputData,
8754 const std::vector<unsigned int>& axis,
8755 bool keepDims,
8756 const unsigned int* outputShape,
8757 const std::vector<T>& outputData,
8758 float scale = 1.0f,
8759 int32_t offset = 0)
narpra011e4c31d2018-09-28 11:07:51 +01008760{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008761 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
narpra011e4c31d2018-09-28 11:07:51 +01008762
8763 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
8764 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
8765
8766 inputTensorInfo.SetQuantizationScale(scale);
8767 inputTensorInfo.SetQuantizationOffset(offset);
8768
8769 outputTensorInfo.SetQuantizationScale(scale);
8770 outputTensorInfo.SetQuantizationOffset(offset);
8771
8772 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
8773
8774 LayerTestResult<T, OutputDim> result(outputTensorInfo);
8775 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
8776
8777 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
8778 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
8779
8780 armnn::MeanQueueDescriptor data;
8781 data.m_Parameters.m_Axis = axis;
8782 data.m_Parameters.m_KeepDims = keepDims;
8783 armnn::WorkloadInfo info;
8784 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
8785 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
8786
8787 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
8788
8789 inputHandle->Allocate();
8790 outputHandle->Allocate();
8791
8792 CopyDataToITensorHandle(inputHandle.get(), input.origin());
8793
Derek Lambertif30f7d32019-04-09 10:25:02 +01008794 workload->PostAllocationConfigure();
narpra011e4c31d2018-09-28 11:07:51 +01008795 workload->Execute();
8796
8797 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
8798
8799 return result;
8800}
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008801
narpra011e4c31d2018-09-28 11:07:51 +01008802} // anonymous namespace
8803
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008804LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(
8805 armnn::IWorkloadFactory& workloadFactory,
8806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008807{
8808 const unsigned int inputShape[] = { 3, 2 };
8809 const unsigned int outputShape[] = { 1 };
8810
8811 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8812 std::vector<uint8_t> output({ 2 });
8813
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008814 return MeanTestHelper<uint8_t, 2, 1>(
8815 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008816}
8817
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008818LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(
8819 armnn::IWorkloadFactory& workloadFactory,
8820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008821{
8822 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8823 const unsigned int outputShape[] = { 1, 1, 2 };
8824
8825 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8826 std::vector<uint8_t> output({ 2, 2 });
8827
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008828 return MeanTestHelper<uint8_t, 4, 3>(
8829 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008830}
8831
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008832LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(
8833 armnn::IWorkloadFactory& workloadFactory,
8834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008835{
8836 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8837 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8838
8839 std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
8840 std::vector<uint8_t> output({ 2, 2 });
8841
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008842 return MeanTestHelper<uint8_t, 4, 4>(
8843 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008844}
8845
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008846LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(
8847 armnn::IWorkloadFactory& workloadFactory,
8848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008849{
8850 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8851 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8852
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008853 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
narpra011e4c31d2018-09-28 11:07:51 +01008854 std::vector<uint8_t> output({ 1, 3, 5 });
8855
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008856 return MeanTestHelper<uint8_t, 4, 4>(
8857 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008858}
8859
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008860LayerTestResult<uint8_t, 1> MeanVtsUint8Test(
8861 armnn::IWorkloadFactory& workloadFactory,
8862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008863{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008864 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008865 const unsigned int outputShape[] = { 2 };
8866
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008867 std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
8868 24 });
8869 std::vector<uint8_t> output({ 12, 13 });
narpra011e4c31d2018-09-28 11:07:51 +01008870
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008871 return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, memoryManager,
8872 inputShape, input, { 0, 1 }, false, outputShape,
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008873 output, 0.8f, 5);
narpra011e4c31d2018-09-28 11:07:51 +01008874}
8875
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008876LayerTestResult<float, 1> MeanFloatSimpleTest(
8877 armnn::IWorkloadFactory& workloadFactory,
8878 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008879{
8880 const unsigned int inputShape[] = { 3, 2 };
8881 const unsigned int outputShape[] = { 1 };
8882
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008883 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8884 std::vector<float> output({ 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008885
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008886 return MeanTestHelper<float, 2, 1>(
8887 workloadFactory, memoryManager, inputShape, input, {}, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008888}
8889
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008890LayerTestResult<float, 3> MeanFloatSimpleAxisTest(
8891 armnn::IWorkloadFactory& workloadFactory,
8892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008893{
8894 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8895 const unsigned int outputShape[] = { 3, 1, 2 };
8896
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008897 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8898 std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008899
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008900 return MeanTestHelper<float, 4, 3>(
8901 workloadFactory, memoryManager, inputShape, input, { 0 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008902}
8903
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008904LayerTestResult<float, 4> MeanFloatKeepDimsTest(
8905 armnn::IWorkloadFactory& workloadFactory,
8906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008907{
8908 const unsigned int inputShape[] = { 1, 1, 3, 2 };
8909 const unsigned int outputShape[] = { 1, 1, 1, 2 };
8910
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008911 std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
8912 std::vector<float> output({ 2.0f, 2.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008913
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008914 return MeanTestHelper<float, 4, 4>(
8915 workloadFactory, memoryManager, inputShape, input, { 2 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008916}
8917
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008918LayerTestResult<float, 4> MeanFloatMultipleDimsTest(
8919 armnn::IWorkloadFactory& workloadFactory,
8920 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008921{
8922 const unsigned int inputShape[] = { 2, 3, 1, 2 };
8923 const unsigned int outputShape[] = { 1, 3, 1, 1 };
8924
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008925 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
8926 std::vector<float> output({ 1.5f, 3.5f, 5.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008927
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008928 return MeanTestHelper<float, 4, 4>(
8929 workloadFactory, memoryManager, inputShape, input, { 0, 3 }, true, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008930}
8931
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008932LayerTestResult<float, 1> MeanVtsFloat1Test(
8933 armnn::IWorkloadFactory& workloadFactory,
8934 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008935{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008936 const unsigned int inputShape[] = { 4, 3, 2 };
narpra011e4c31d2018-09-28 11:07:51 +01008937 const unsigned int outputShape[] = { 2 };
8938
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008939 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8940 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8941 std::vector<float> output({ 12.0f, 13.0f });
narpra011e4c31d2018-09-28 11:07:51 +01008942
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008943 return MeanTestHelper<float, 3, 1>(
8944 workloadFactory, memoryManager, inputShape, input, { 0, 1 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008945}
8946
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008947LayerTestResult<float, 3> MeanVtsFloat2Test(
8948 armnn::IWorkloadFactory& workloadFactory,
8949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
narpra011e4c31d2018-09-28 11:07:51 +01008950{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008951 const unsigned int inputShape[] = { 4, 3, 2 };
8952 const unsigned int outputShape[] = { 1, 3, 1 };
narpra011e4c31d2018-09-28 11:07:51 +01008953
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008954 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
8955 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
8956 std::vector<float> output({ 10.5f, 12.5f, 14.5f });
narpra011e4c31d2018-09-28 11:07:51 +01008957
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008958 return MeanTestHelper<float, 3, 3>(
8959 workloadFactory, memoryManager, inputShape, input, { 0, 2 }, true, outputShape, output);
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008960}
8961
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008962LayerTestResult<float, 3> MeanVtsFloat3Test(
8963 armnn::IWorkloadFactory& workloadFactory,
8964 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matteo Martincigh28dcab62018-10-19 16:40:03 +01008965{
8966 const unsigned int inputShape[] = { 1, 2, 2, 1 };
8967 const unsigned int outputShape[] = { 1, 2, 1 };
8968
8969 std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
8970 std::vector<float> output({ 1.5f, 3.5f });
8971
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008972 return MeanTestHelper<float, 4, 3>(
8973 workloadFactory, memoryManager, inputShape, input, { 2 }, false, outputShape, output);
narpra011e4c31d2018-09-28 11:07:51 +01008974}
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008975
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00008976LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
8977 armnn::IWorkloadFactory& workloadFactory,
8978 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008979{
8980 // Create Initial Tensor
8981 // 1, 2, 3
8982 // 4, 5, 6
8983 // 7, 8, 9
8984
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00008985 armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
8986 armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01008987
8988 boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
8989 {1, 2, 3,
8990 4, 5, 6,
8991 7, 8, 9
8992 });
8993
8994 std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
8995 workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
8996 std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
8997 workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
8998
8999 // Apply MaxPool poolSize = 1x1, stride=2x2
9000 // Result =
9001 // 1, 3
9002 // 7, 9
9003 armnn::Pooling2dDescriptor descriptor;
9004 descriptor.m_PoolHeight = 1;
9005 descriptor.m_PoolWidth = 1;
9006 descriptor.m_StrideX = 2;
9007 descriptor.m_StrideY = 2;
9008 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
9009
9010 armnn::Pooling2dQueueDescriptor queueDescriptor;
9011 queueDescriptor.m_Parameters = descriptor;
9012 armnn::WorkloadInfo workloadInfo;
9013 AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
9014 AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
9015
9016 // Create the MaxPool
9017 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
9018
9019 //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
9020 auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
9021 boost::multi_array<float, 4> resultMaxPool;
9022 resultMaxPool.resize(shape);
9023
9024
9025 // Create addition with another tensor the same size
9026 // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
9027 // with the initial tensor.
9028 // 12, 16
9029 // 24, 28
9030
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009031 armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
9032 armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009033
9034 boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
9035 {12, 16,
9036 24, 28,
9037 });
9038
9039 // Expected output tensor after MaxPool and Addition.
9040 LayerTestResult<float,4> addRet(addOutputTensorInfo);
9041 addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
9042 {
9043 13, 19,
9044 31, 37
9045 }));
9046
9047 std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
9048 std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
9049
9050 armnn::AdditionQueueDescriptor data;
9051 armnn::WorkloadInfo info;
9052
9053 // Add the output of the MaxPool and the new tensor
9054 AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
9055 AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
9056 AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
9057
9058 std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
9059
9060 poolingInputHandle->Allocate();
9061 poolingOutputHandle->Allocate();
9062 addInputHandle->Allocate();
9063 addOutputHandle->Allocate();
9064
9065 CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
9066 CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
9067
9068 CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
9069 CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
9070
Derek Lambertif30f7d32019-04-09 10:25:02 +01009071 workload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009072 workload->Execute();
Derek Lambertif30f7d32019-04-09 10:25:02 +01009073 addWorkload->PostAllocationConfigure();
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009074 addWorkload->Execute();
9075
9076 CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
9077
Éanna Ó Catháin47c1ddb2018-10-12 14:24:13 +01009078 return addRet;
9079}
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009080
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009081LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
9082 armnn::IWorkloadFactory& workloadFactory,
9083 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009084{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009085 return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009086}
9087
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009088LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
9089 armnn::IWorkloadFactory& workloadFactory,
9090 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009091{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009092 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009093}
9094
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009095LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
9096 armnn::IWorkloadFactory& workloadFactory,
9097 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009098{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009099 return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009100}
9101
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009102LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
9103 armnn::IWorkloadFactory& workloadFactory,
9104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009105{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009106 return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009107}
9108
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009109LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
9110 armnn::IWorkloadFactory& workloadFactory,
9111 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009112{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009113 return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009114}
9115
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009116LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
9117 armnn::IWorkloadFactory& workloadFactory,
9118 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009119{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009120 return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009121}
9122
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009123LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
9124 armnn::IWorkloadFactory& workloadFactory,
9125 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009126{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009127 return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009128}
9129
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009130LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
9131 armnn::IWorkloadFactory& workloadFactory,
9132 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009133{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009134 return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009135}
9136
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009137LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
9138 armnn::IWorkloadFactory& workloadFactory,
9139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009140{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009141 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009142}
9143
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009144LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
9145 armnn::IWorkloadFactory& workloadFactory,
9146 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009147{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009148 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009149}
9150
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009151LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
9152 armnn::IWorkloadFactory& workloadFactory,
9153 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009154{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009155 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009156}
9157
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009158LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
9159 armnn::IWorkloadFactory& workloadFactory,
9160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009161{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009162 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009163}
9164
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009165LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
9166 armnn::IWorkloadFactory& workloadFactory,
9167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009168{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009169 return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009170}
9171
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009172LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
9173 armnn::IWorkloadFactory& workloadFactory,
9174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009175{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009176 return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009177}
9178
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009179LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
9180 armnn::IWorkloadFactory& workloadFactory,
9181 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009182{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009183 return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009184}
9185
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009186LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
9187 armnn::IWorkloadFactory& workloadFactory,
9188 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009189{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009190 return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong3ea76d52018-11-09 14:10:38 +00009191}
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009192
9193namespace {
9194
9195template<typename T, std::size_t InputDim, std::size_t OutputDim>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009196LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
9197 armnn::IWorkloadFactory &workloadFactory,
9198 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
9199 const armnn::DataLayout& dataLayout,
9200 const unsigned int *inputShape,
9201 const std::vector<T> &inputData,
9202 const std::vector<unsigned int> &blockShape,
9203 const std::vector<std::pair<unsigned int, unsigned int>> &crops,
9204 const unsigned int *outputShape,
9205 const std::vector<T> &outputData,
9206 float scale = 1.0f,
9207 int32_t offset = 0)
Derek Lambertif30f7d32019-04-09 10:25:02 +01009208{
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009209 auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
9210
9211 armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
9212 armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
9213
9214 inputTensorInfo.SetQuantizationScale(scale);
9215 inputTensorInfo.SetQuantizationOffset(offset);
9216
9217 outputTensorInfo.SetQuantizationScale(scale);
9218 outputTensorInfo.SetQuantizationOffset(offset);
9219
9220 auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
9221
9222 LayerTestResult<T, OutputDim> result(outputTensorInfo);
9223 result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
9224
9225 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
9226 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
9227
9228 armnn::BatchToSpaceNdQueueDescriptor data;
9229 data.m_Parameters.m_DataLayout = dataLayout;
9230 data.m_Parameters.m_BlockShape = blockShape;
9231 data.m_Parameters.m_Crops = crops;
9232 armnn::WorkloadInfo info;
9233 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
9234 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
9235
9236 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchToSpaceNd(data, info);
9237
9238 inputHandle->Allocate();
9239 outputHandle->Allocate();
9240
9241 CopyDataToITensorHandle(inputHandle.get(), input.origin());
9242
Derek Lambertif30f7d32019-04-09 10:25:02 +01009243 workload->PostAllocationConfigure();
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009244 workload->Execute();
9245
9246 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
9247
9248 return result;
9249}
9250
9251} // anonymous namespace
9252
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009253LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test1(
9254 armnn::IWorkloadFactory& workloadFactory,
9255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009256{
9257 const unsigned int inputShape[] = {4, 2, 2, 1};
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009258 const unsigned int outputShape[] = {1, 4, 4, 1};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009259
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009260 std::vector<float> input({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009261 // Batch 0, Height 0, Width (2) x Channel (1)
9262 1.0f, 3.0f,
9263 // Batch 0, Height 1, Width (2) x Channel (1)
9264 9.0f, 11.0f,
9265
9266
9267 // Batch 1, Height 0, Width (2) x Channel (1)
9268 2.0f, 4.0f,
9269 // Batch 1, Height 1, Width (2) x Channel (1)
9270 10.0f, 12.0f,
9271
9272
9273 // Batch 2, Height 0, Width (2) x Channel (1)
9274 5.0f, 7.0f,
9275 // Batch 2, Height 1, Width (2) x Channel (1)
9276 13.0f, 15.0f,
9277
9278 // Batch 3, Height 0, Width (2) x Channel (3)
9279 6.0f, 8.0f,
9280 // Batch 3, Height 1, Width (2) x Channel (1)
9281 14.0f, 16.0f
9282 });
9283
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009284 std::vector<float> expectedOutput({
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009285 1.0f, 2.0f, 3.0f, 4.0f,
9286 5.0f, 6.0f, 7.0f, 8.0f,
9287 9.0f, 10.0f, 11.0f, 12.0f,
9288 13.0f, 14.0f, 15.0f, 16.0f
9289 });
9290
9291 std::vector<unsigned int> blockShape {2, 2};
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009292 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009293
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009294 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9295 armnn::DataLayout::NHWC, inputShape, input, blockShape,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009296 crops, outputShape, expectedOutput);
9297}
9298
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009299LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test2(
9300 armnn::IWorkloadFactory& workloadFactory,
9301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009302{
9303 const unsigned int inputShape[] = {4, 1, 1, 1};
9304 const unsigned int outputShape[] = {1, 2, 2, 1};
9305
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009306 std::vector<float> input({
9307 // Batch 0, Height 0, Width (2) x Channel (1)
9308 1.0f, 2.0f, 3.0f, 4.0f
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009309 });
9310
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009311 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009312
9313 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009314 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009315
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009316 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9317 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9318 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009319}
9320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009321LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test3(
9322 armnn::IWorkloadFactory& workloadFactory,
9323 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009324{
9325 const unsigned int inputShape[] = {4, 1, 1, 3};
9326 const unsigned int outputShape[] = {1, 2, 2, 3};
9327
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009328 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009329
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009330 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009331
9332 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009333 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009334
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009335 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9336 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9337 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009338}
9339
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009340LayerTestResult<float, 4> BatchToSpaceNdNhwcFloat32Test4(
9341 armnn::IWorkloadFactory& workloadFactory,
9342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9343{
9344 const unsigned int inputShape[] = {8, 1, 3, 1};
9345 const unsigned int outputShape[] = {2, 2, 4, 1};
9346
9347 std::vector<float> input({
9348 0.0f, 1.0f, 3.0f,
9349 0.0f, 9.0f, 11.0f,
9350 0.0f, 2.0f, 4.0f,
9351 0.0f, 10.0f, 12.0f,
9352 0.0f, 5.0f, 7.0f,
9353 0.0f, 13.0f, 15.0f,
9354 0.0f, 6.0f, 8.0f,
9355 0.0f, 14.0f, 16.0f
9356 });
9357
9358 std::vector<float> expectedOutput({
9359 1.0f, 2.0f, 3.0f, 4.0f,
9360 5.0f, 6.0f, 7.0f, 8.0f,
9361 9.0f, 10.0f, 11.0f, 12.0f,
9362 13.0f, 14.0f, 15.0f, 16.0f
9363 });
9364
9365 std::vector<unsigned int> blockShape({2, 2});
9366 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9367
9368 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9369 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9370 crops, outputShape, expectedOutput);
9371}
9372
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009373LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test1(
9374 armnn::IWorkloadFactory &workloadFactory,
9375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009376{
9377 const unsigned int inputShape[] = {4, 3, 1, 1};
9378 const unsigned int outputShape[] = {1, 3, 2, 2};
9379
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009380 std::vector<float> input({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f});
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009381
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009382 std::vector<float> expectedOutput({
9383 // Batch 0, Channel 0, Height (2) x Width (2)
9384 1.0f, 4.0f,
9385 7.0f, 10.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009386
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009387 // Batch 0, Channel 1, Height (2) x Width (2)
9388 2.0f, 5.0f,
9389 8.0f, 11.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009390
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009391 // Batch 0, Channel 2, Height (2) x Width (2)
9392 3.0f, 6.0f,
9393 9.0f, 12.0f,
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009394 });
9395
9396 std::vector<unsigned int> blockShape({2, 2});
Éanna Ó Catháin95807ce2018-11-12 17:14:43 +00009397 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009398
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009399 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9400 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9401 crops, outputShape, expectedOutput);
Éanna Ó Catháin4e1e1362018-11-12 11:36:34 +00009402}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009403
Mike Kelly831faed2018-11-28 11:52:08 +00009404LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test2(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009405 armnn::IWorkloadFactory& workloadFactory,
9406 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009407{
9408 const unsigned int inputShape[] = {4, 1, 1, 1};
9409 const unsigned int outputShape[] = {1, 1, 2, 2};
9410
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009411 std::vector<float> input({
9412 // Batch 0, Height 0, Width (2) x Channel (1)
9413 1.0f, 2.0f, 3.0f, 4.0f
9414 });
Mike Kelly831faed2018-11-28 11:52:08 +00009415
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009416 std::vector<float> expectedOutput({1.0f, 2.0f, 3.0f, 4.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009417
9418 std::vector<unsigned int> blockShape({2, 2});
9419 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9420
9421 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9422 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9423 crops, outputShape, expectedOutput);
9424}
9425
9426LayerTestResult<float, 4> BatchToSpaceNdNchwFloat32Test3(
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009427 armnn::IWorkloadFactory& workloadFactory,
9428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Mike Kelly831faed2018-11-28 11:52:08 +00009429{
9430 const unsigned int inputShape[] = {4, 3, 1, 1};
9431 const unsigned int outputShape[] = {1, 3, 2, 2};
9432
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009433 std::vector<float> input({1.0f, 3.0f, 5.0f, 7.0f, 9.0f, 11.0f, 2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f});
Mike Kelly831faed2018-11-28 11:52:08 +00009434
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009435 std::vector<float> expectedOutput({
9436 // Batch 0, Channel 0, Height (2) x Width (2)
9437 1.0f, 7.0f,
9438 2.0f, 8.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009439
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009440 // Batch 0, Channel 1, Height (2) x Width (2)
9441 3.0f, 9.0f,
9442 4.0f, 10.0f,
Mike Kelly831faed2018-11-28 11:52:08 +00009443
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009444 // Batch 0, Channel 2, Height (2) x Width (2)
9445 5.0f, 11.0f,
9446 6.0f, 12.0f,
9447 });
Mike Kelly831faed2018-11-28 11:52:08 +00009448
9449 std::vector<unsigned int> blockShape({2, 2});
9450 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9451
9452 return BatchToSpaceNdHelper<float, 4, 4>(workloadFactory, memoryManager,
9453 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9454 crops, outputShape, expectedOutput);
9455}
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009456
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009457LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest1(
9458 armnn::IWorkloadFactory& workloadFactory,
9459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009460{
9461 const unsigned int inputShape[] = {4, 2, 2, 1};
9462 const unsigned int outputShape[] = {1, 4, 4, 1};
9463
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009464 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
9465 std::vector<uint8_t> expectedOutput({1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16});
Éanna Ó Catháin262553e2018-11-14 11:26:23 +00009466
9467 std::vector<unsigned int> blockShape({2, 2});
9468 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9469
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00009470 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager, armnn::DataLayout::NHWC, inputShape,
9471 input, blockShape, crops, outputShape, expectedOutput);
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009472}
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009473
Nattapat Chaimanowong3ee14222019-02-27 10:28:09 +00009474LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
9475 armnn::IWorkloadFactory& workloadFactory,
9476 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9477{
9478 const unsigned int inputShape[] = {4, 1, 1, 1};
9479 const unsigned int outputShape[] = {1, 2, 2, 1};
9480
9481 std::vector<uint8_t> input({
9482 // Batch 0, Height 0, Width (2) x Channel (1)
9483 1, 2, 3, 4
9484 });
9485
9486 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9487
9488 std::vector<unsigned int> blockShape({2, 2});
9489 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9490
9491 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9492 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9493 crops, outputShape, expectedOutput);
9494}
9495
9496LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest3(
9497 armnn::IWorkloadFactory& workloadFactory,
9498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9499{
9500 const unsigned int inputShape[] = {4, 1, 1, 3};
9501 const unsigned int outputShape[] = {1, 2, 2, 3};
9502
9503 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9504
9505 std::vector<uint8_t> expectedOutput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9506
9507 std::vector<unsigned int> blockShape({2, 2});
9508 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9509
9510 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9511 armnn::DataLayout::NHWC, inputShape, input, blockShape,
9512 crops, outputShape, expectedOutput);
9513}
9514
9515
9516LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest1(
9517 armnn::IWorkloadFactory &workloadFactory,
9518 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9519{
9520 const unsigned int inputShape[] = {4, 3, 1, 1};
9521 const unsigned int outputShape[] = {1, 3, 2, 2};
9522
9523 std::vector<uint8_t> input({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
9524
9525 std::vector<uint8_t> expectedOutput({
9526 // Batch 0, Channel 0, Height (2) x Width (2)
9527 1, 4,
9528 7, 10,
9529
9530 // Batch 0, Channel 1, Height (2) x Width (2)
9531 2, 5,
9532 8, 11,
9533
9534 // Batch 0, Channel 2, Height (2) x Width (2)
9535 3, 6,
9536 9, 12,
9537 });
9538
9539 std::vector<unsigned int> blockShape({2, 2});
9540 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9541
9542 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9543 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9544 crops, outputShape, expectedOutput);
9545}
9546
9547LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest2(
9548 armnn::IWorkloadFactory& workloadFactory,
9549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9550{
9551 const unsigned int inputShape[] = {4, 1, 1, 1};
9552 const unsigned int outputShape[] = {1, 1, 2, 2};
9553
9554 std::vector<uint8_t> input({
9555 // Batch 0, Height 0, Width (2) x Channel (1)
9556 1, 2, 3, 4
9557 });
9558
9559 std::vector<uint8_t> expectedOutput({1, 2, 3, 4});
9560
9561 std::vector<unsigned int> blockShape({2, 2});
9562 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9563
9564 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9565 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9566 crops, outputShape, expectedOutput);
9567}
9568
9569LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest3(
9570 armnn::IWorkloadFactory& workloadFactory,
9571 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9572{
9573 const unsigned int inputShape[] = {4, 3, 1, 1};
9574 const unsigned int outputShape[] = {1, 3, 2, 2};
9575
9576 std::vector<uint8_t> input({1, 3, 5, 7, 9, 11, 2, 4, 6, 8, 10, 12});
9577
9578 std::vector<uint8_t> expectedOutput({
9579 // Batch 0, Channel 0, Height (2) x Width (2)
9580 1, 7,
9581 2, 8,
9582
9583 // Batch 0, Channel 1, Height (2) x Width (2)
9584 3, 9,
9585 4, 10,
9586
9587 // Batch 0, Channel 2, Height (2) x Width (2)
9588 5, 11,
9589 6, 12,
9590 });
9591
9592 std::vector<unsigned int> blockShape({2, 2});
9593 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
9594
9595 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9596 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9597 crops, outputShape, expectedOutput);
9598}
9599
9600LayerTestResult<uint8_t, 4> BatchToSpaceNdNchwUintTest4(
9601 armnn::IWorkloadFactory& workloadFactory,
9602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9603{
9604 const unsigned int inputShape[] = {8, 1, 1, 3};
9605 const unsigned int outputShape[] = {2, 1, 2, 4};
9606
9607 std::vector<uint8_t> input({
9608 0, 1, 3, 0, 9, 11,
9609 0, 2, 4, 0, 10, 12,
9610 0, 5, 7, 0, 13, 15,
9611 0, 6, 8, 0, 14, 16
9612 });
9613
9614 std::vector<uint8_t> expectedOutput({
9615 1, 2, 3, 4,
9616 5, 6, 7, 8,
9617 9, 10, 11, 12,
9618 13, 14, 15, 16
9619 });
9620
9621 std::vector<unsigned int> blockShape({2, 2});
9622 std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {2, 0}};
9623
9624 return BatchToSpaceNdHelper<uint8_t, 4, 4>(workloadFactory, memoryManager,
9625 armnn::DataLayout::NCHW, inputShape, input, blockShape,
9626 crops, outputShape, expectedOutput);
9627}
9628
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009629LayerTestResult<float, 4> StridedSlice4DFloat32Test(
9630 armnn::IWorkloadFactory& workloadFactory,
9631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9632{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009633 return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009634}
9635
9636LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
9637 armnn::IWorkloadFactory& workloadFactory,
9638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9639{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009640 return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009641}
9642
9643LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
9644 armnn::IWorkloadFactory& workloadFactory,
9645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9646{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009647 return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009648}
9649
9650LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
9651 armnn::IWorkloadFactory& workloadFactory,
9652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9653{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009654 return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009655}
9656
9657LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
9658 armnn::IWorkloadFactory& workloadFactory,
9659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9660{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009661 return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009662}
9663
9664LayerTestResult<float, 3> StridedSlice3DFloat32Test(
9665 armnn::IWorkloadFactory& workloadFactory,
9666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9667{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009668 return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009669}
9670
9671LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
9672 armnn::IWorkloadFactory& workloadFactory,
9673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9674{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009675 return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009676}
9677
9678LayerTestResult<float, 2> StridedSlice2DFloat32Test(
9679 armnn::IWorkloadFactory& workloadFactory,
9680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9681{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009682 return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009683}
9684
9685LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
9686 armnn::IWorkloadFactory& workloadFactory,
9687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9688{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009689 return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009690}
9691
9692LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
9693 armnn::IWorkloadFactory& workloadFactory,
9694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9695{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009696 return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009697}
9698
9699LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
9700 armnn::IWorkloadFactory& workloadFactory,
9701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9702{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009703 return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009704}
9705
9706LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
9707 armnn::IWorkloadFactory& workloadFactory,
9708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9709{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009710 return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009711}
9712
9713LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
9714 armnn::IWorkloadFactory& workloadFactory,
9715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9716{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009717 return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009718}
9719
9720LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
9721 armnn::IWorkloadFactory& workloadFactory,
9722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9723{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009724 return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009725}
9726
9727LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
9728 armnn::IWorkloadFactory& workloadFactory,
9729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9730{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009731 return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009732}
9733
9734LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
9735 armnn::IWorkloadFactory& workloadFactory,
9736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9737{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009738 return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009739}
9740
9741LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
9742 armnn::IWorkloadFactory& workloadFactory,
9743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9744{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009745 return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009746}
9747
9748LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
9749 armnn::IWorkloadFactory& workloadFactory,
9750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9751{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009752 return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowong1216b582018-11-23 15:33:41 +00009753}
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009754
9755LayerTestResult<float, 4> Debug4DFloat32Test(
9756 armnn::IWorkloadFactory& workloadFactory,
9757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9758{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009759 return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009760}
9761
9762LayerTestResult<float, 3> Debug3DFloat32Test(
9763 armnn::IWorkloadFactory& workloadFactory,
9764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9765{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009766 return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009767}
9768
9769LayerTestResult<float, 2> Debug2DFloat32Test(
9770 armnn::IWorkloadFactory& workloadFactory,
9771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9772{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009773 return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009774}
9775
9776LayerTestResult<float, 1> Debug1DFloat32Test(
9777 armnn::IWorkloadFactory& workloadFactory,
9778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9779{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009780 return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009781}
9782
9783LayerTestResult<uint8_t, 4> Debug4DUint8Test(
9784 armnn::IWorkloadFactory& workloadFactory,
9785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9786{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009787 return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009788}
9789
9790LayerTestResult<uint8_t, 3> Debug3DUint8Test(
9791 armnn::IWorkloadFactory& workloadFactory,
9792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9793{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009794 return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009795}
9796
9797LayerTestResult<uint8_t, 2> Debug2DUint8Test(
9798 armnn::IWorkloadFactory& workloadFactory,
9799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9800{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009801 return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009802}
9803
9804LayerTestResult<uint8_t, 1> Debug1DUint8Test(
9805 armnn::IWorkloadFactory& workloadFactory,
9806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9807{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009808 return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
Nattapat Chaimanowongcfdcadf2018-12-06 11:54:33 +00009809}
Matteo Martincigh49124022019-01-11 13:25:59 +00009810
narpra014951d842019-01-18 16:53:53 +00009811LayerTestResult<float, 1> Gather1DParamsFloatTest(
9812 armnn::IWorkloadFactory& workloadFactory,
9813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9814{
9815 return Gather1DParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9816}
9817
9818LayerTestResult<uint8_t, 1> Gather1DParamsUint8Test(
9819 armnn::IWorkloadFactory& workloadFactory,
9820 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9821{
9822 return Gather1DParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9823}
9824
9825LayerTestResult<float, 2> GatherMultiDimParamsFloatTest(
9826 armnn::IWorkloadFactory& workloadFactory,
9827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9828{
9829 return GatherMultiDimParamsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9830}
9831
9832LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
9833 armnn::IWorkloadFactory& workloadFactory,
9834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9835{
9836 return GatherMultiDimParamsTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9837}
9838
9839LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloatTest(
9840 armnn::IWorkloadFactory& workloadFactory,
9841 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9842{
9843 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager);
9844}
9845
9846LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
9847 armnn::IWorkloadFactory& workloadFactory,
9848 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9849{
9850 return GatherMultiDimParamsMultiDimIndicesTestImpl<armnn::DataType::QuantisedAsymm8>(
9851 workloadFactory, memoryManager);
Matteo Martincigh3d6898c2019-01-15 16:11:44 +00009852}
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009853
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009854LayerTestResult<float, 4> DequantizeSimpleUint8Test(
Nattapat Chaimanowong8a54ac02019-03-29 15:25:04 +00009855 armnn::IWorkloadFactory& workloadFactory,
9856 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9857{
9858 return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9859}
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009860
Nattapat Chaimanowongafa4e3a2019-04-02 11:41:45 +01009861LayerTestResult<float, 4> DequantizeOffsetUint8Test(
9862 armnn::IWorkloadFactory& workloadFactory,
9863 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9864{
9865 return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9866}
9867
9868LayerTestResult<float, 4> DequantizeSimpleInt16Test(
9869 armnn::IWorkloadFactory& workloadFactory,
9870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9871{
9872 return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9873}
9874
Nattapat Chaimanowonga0beb3b2019-04-01 17:04:53 +01009875LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
9876 armnn::IWorkloadFactory& workloadFactory,
9877 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9878{
9879 return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9880}
9881
9882LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
9883 armnn::IWorkloadFactory& workloadFactory,
9884 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9885{
9886 return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
9887}
9888
9889LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
9890 armnn::IWorkloadFactory& workloadFactory,
9891 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
9892{
9893 return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
9894}