blob: f6f4b09f6afd5d7deed959983f4017447a850bf9 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConcatTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
10
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011
Matteo Martincighe011d202019-11-28 11:35:47 +000012#include <armnnUtils/Permute.hpp>
13
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010019using namespace armnn;
20using namespace armnnUtils;
21
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022//
23// Helper functions and templates
24//
25
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010026OriginsDescriptor CreateDescriptorForConcat(
27 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028 unsigned int concatDim)
29{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010030 std::vector<TensorShape> shapes;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010031 shapes.reserve(inputTensorInfos.size());
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010032 for (const TensorInfo& it: inputTensorInfos)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010033 {
34 shapes.push_back(it.GetShape());
35 }
36
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010037 return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010038}
39
40//
41// Concat is only supported for N and C dimensions for NCHW and the inner most dimension
42// In case of <4 dimensions we need to make sure that the concat dimensions are at least
43// the 3rd slowest iterating one or the inner most dimension.
44//
45
46bool NeedPermuteForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010047 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010048 unsigned int concatDim)
49{
50 // See note above. Additionally we expect the input shapes to have the
51 // same number of dimensions.
52 unsigned int nDimensions = 0;
53
54 // Determine the number of dimensions as well as sanity check them
55 // agains test implementation issues.
56 for (auto && tensorInfo : inputTensorInfos)
57 {
58 if (!nDimensions)
59 {
60 nDimensions = tensorInfo.GetShape().GetNumDimensions();
61 }
62 else
63 {
64 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
65 "Input shapes must have the same number of dimensions");
66 }
67 }
68
69 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
70}
71
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010072TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010073{
74 unsigned int numDims = inputShape.GetNumDimensions();
75 if (numDims >= 3)
76 {
77 // Nothing to do if the inputShape has at least 3 dimensions.
78 return inputShape;
79 }
80
81 std::vector<unsigned int> newDims(size_t(3), 1u);
82 unsigned int expandedBy = 3 - numDims;
83 for (unsigned int i=0; i<numDims; ++i)
84 {
85 newDims[expandedBy+i] = inputShape[i];
86 }
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010087 return TensorShape(3u, &newDims[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088}
89
90void Generate3dPermuteVectorForConcat(
91 unsigned int numDimensions,
92 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010093 std::pair<PermutationVector, PermutationVector> & permutations)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010094{
95 BOOST_ASSERT_MSG(numDimensions <= 3,
96 "Only dimensions 1,2 and 3 are supported by this helper");
97 unsigned int expandedBy = 3 - numDimensions;
98 unsigned int expandedConcatAxis = concatDim + expandedBy;
99
100 if (expandedConcatAxis == 2)
101 {
102 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100103 PermutationVector forwardPermutation({1, 2, 0});
104 PermutationVector reversePermutation({2, 0, 1});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100105 permutations = std::make_pair(forwardPermutation, reversePermutation);
106 }
107 else if (expandedConcatAxis == 1)
108 {
109 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100110 PermutationVector forwardPermutation({2, 0, 1});
111 PermutationVector reversePermutation({1, 2, 0});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100112 permutations = std::make_pair(forwardPermutation, reversePermutation);
113 }
114 else
115 {
116 BOOST_ASSERT(expandedConcatAxis == 0);
117 concatDim = 0;
118 }
119}
120
121template<typename T> void PermuteTensorData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100122 IWorkloadFactory& workloadFactory,
123 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
124 const PermutationVector& mappings,
125 TensorInfo & inputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100126 const T * inputData,
127 std::vector<T>& outputData)
128{
Jan Eilers8eb25602020-03-09 12:13:48 +0000129 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100130 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
131 if (inputData == nullptr)
132 {
133 // Nullptr is an error in the test. By returning without doing the concatenation
134 // I expect the caller to fail the test. It still makes sense to report this as
135 // an assert for Debug builds.
136 return;
137 }
138
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100139 TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100140
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100141 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
142 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100143
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100144 PermuteQueueDescriptor queueDescriptor;
145 queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
146 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100147 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
148 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
149
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100150 std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100151
152 inputHandle->Allocate();
153 outputHandle->Allocate();
154
155 CopyDataToITensorHandle(inputHandle.get(), inputData);
156
157 workload->PostAllocationConfigure();
158 workload->Execute();
159
160 outputData.resize(outputTensorInfo.GetNumElements());
161 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
162 inputTensorInfo = outputTensorInfo;
163}
164
165//
166// Permute the input tensors so we can do a supported concatenation.
167// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
168// at the front. Finally this function tells what the output shape
169// of the permuted concatenated tensor is going to be.
170//
171template<typename T> void PermuteInputsForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100172 IWorkloadFactory& workloadFactory,
173 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
174 std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100175 std::vector<T *> & inputData,
176 std::vector<std::vector<T>> & inputDataStorage,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100177 PermutationVector & permuteVector,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100178 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100179 TensorInfo & outputTensorInfo)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100180{
Jan Eilers8eb25602020-03-09 12:13:48 +0000181 IgnoreUnused(memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100182 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
183 "Expecting more than one tensor to be concatenated here");
184
185 unsigned int numDims = 0;
186 unsigned int nthInput = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100187 const PermutationVector identity({0, 1, 2});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100188
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100189 std::pair<PermutationVector, PermutationVector> permutations =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100190 std::make_pair(identity, identity);
191
192 inputDataStorage.resize(inputData.size());
193
194 for (auto && tensorInfo : inputTensorInfos)
195 {
196 if (numDims == 0)
197 {
198 numDims = tensorInfo.GetShape().GetNumDimensions();
199 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
200
201 // Store the reverese permutation.
202 permuteVector = permutations.second;
203 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
204 "Test logic error, we don't need permutation, so we shouldn't arrive here");
205 }
206 else
207 {
208 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
209 "All inputs must have the same number of dimensions");
210 }
211
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100212 TensorInfo newTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100213 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
214
215 PermuteTensorData<T>(workloadFactory,
216 memoryManager,
217 permutations.first,
218 newTensorInfo,
219 inputData[nthInput],
220 inputDataStorage[nthInput]);
221
222 inputData[nthInput] = inputDataStorage[nthInput].data();
223 inputTensorInfos[nthInput] = newTensorInfo;
224
225 ++nthInput;
226 }
227
228 outputTensorInfo.SetShape(
229 armnnUtils::Permuted(
230 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
231 permutations.first));
232}
233
234//
235// This is the pair of PermuteInputsForConcat(...) which permutes back
236// the output of the concatenation so we can check it against an expected
237// output.
238//
239template <typename T> void PermuteOutputForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100240 IWorkloadFactory& workloadFactory,
241 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
242 const TensorInfo & tensorInfo,
243 const PermutationVector & permuteVector,
244 std::unique_ptr<ITensorHandle> && inputDataHandle,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100245 T * data)
246{
247 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
248 if (data == nullptr)
249 {
250 // Nullptr is an error in the test. By returning without doing the permutation
251 // I expect the caller to fail the test. It still makes sense to report this as
252 // an assert for Debug builds.
253 return;
254 }
255
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100256 TensorInfo resultTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100257 std::vector<T> inputData(tensorInfo.GetNumElements());
258 std::vector<T> outputData;
259
260 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
261
262 PermuteTensorData<T>(workloadFactory,
263 memoryManager,
264 permuteVector,
265 resultTensorInfo,
266 &inputData[0],
267 outputData);
268
269 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
270}
271
272template<typename T> void Concatenate(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100273 IWorkloadFactory& workloadFactory,
274 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
275 std::initializer_list<const TensorInfo> inputTensorInfosOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100276 std::initializer_list<T *> inputsOrig,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100277 const TensorInfo& outputTensorInfoOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100278 T * output,
279 unsigned int concatDim,
280 bool useSubtensor)
281{
282 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
283 if (output == nullptr)
284 {
285 // Nullptr is an error in the test. By returning without doing the permutation
286 // I expect the caller to fail the test. It still makes sense to report this as
287 // an assert for Debug builds.
288 return;
289 }
290
291 // Saves a copy of the parameters which we might need to change.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100292 std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100293 std::vector<T *> inputs = inputsOrig;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100294 TensorInfo outputTensorInfo = outputTensorInfoOrig;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100295
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100296 PermutationVector permuteVector{0, 1, 2};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100297
298 // Holds and automatically releases memory for the reshaped input data.
299 std::vector<std::vector<T>> tmpInputDataStorage;
300
301 const size_t inputCount = inputTensorInfos.size();
302
303 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
304
305 if (needPermuteForConcat)
306 {
307 //
308 // We need to permute the inputs, because concatenation along
309 // the requested axis is not supported.
310 //
311 PermuteInputsForConcat<T>(workloadFactory,
312 memoryManager,
313 inputTensorInfos,
314 inputs,
315 tmpInputDataStorage,
316 permuteVector,
317 concatDim,
318 outputTensorInfo);
319 }
320
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100321 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100322
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100323 std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100324 inputHandles.reserve(inputCount);
325
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100326 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100327
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100328 ConcatQueueDescriptor queueDescriptor;
329 OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100330 queueDescriptor.m_Parameters = viewsDescriptor;
331
332 if (useSubtensor)
333 {
334 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
335 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
336 {
337 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
338 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
339 }
340
341 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
342
343 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
344 for (unsigned int i = 0; i < inputCount; ++i)
345 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100346 const TensorInfo& inputTensorInfo = inputTensorInfos[i];
347 std::unique_ptr<ITensorHandle> inputHandle =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100348 subTensorsSupported ?
349 workloadFactory.CreateSubTensorHandle(*outputHandle,
350 inputTensorInfo.GetShape(),
351 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
352 workloadFactory.CreateTensorHandle(inputTensorInfo);
353
354 inputHandles.emplace_back(std::move(inputHandle));
355 }
356
357 }
358 else
359 {
360 for (unsigned int i = 0; i < inputCount; ++i)
361 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100362 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100363 inputHandles.emplace_back(std::move(inputHandle));
364 }
365 }
366
367 for (unsigned int i = 0; i < inputCount; ++i)
368 {
369 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
370 }
371
372 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
373
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100374 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100375
376 for (auto& inputHandle : inputHandles)
377 {
378 inputHandle->Allocate();
379 }
380
381 outputHandle->Allocate();
382
383 unsigned int nextInputId = 0;
384 for (auto& inputHandle : inputHandles)
385 {
386 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
387 ++nextInputId;
388 }
389
390 workload->PostAllocationConfigure();
391 workload->Execute();
392
393 if (needPermuteForConcat)
394 {
395 PermuteOutputForConcat<T>(workloadFactory,
396 memoryManager,
397 outputTensorInfo,
398 permuteVector,
399 std::move(outputHandle),
400 output);
401 }
402 else
403 {
404 CopyDataFromITensorHandle(output, outputHandle.get());
405 }
406}
407
408//
409// Implementation templates
410//
411
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100412template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100413LayerTestResult<T, 1> Concat1dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100414 IWorkloadFactory& workloadFactory,
415 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100416 float qScale,
417 int32_t qOffset)
418{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100419 TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100420
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100421 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
422 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
423 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100424
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100425 TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100426
427 LayerTestResult<T, 1> result(outputTensorInfo);
428
429 std::vector<T> output;
430 output.resize(outputTensorInfo.GetNumElements());
431 Concatenate<T>(workloadFactory, memoryManager,
432 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
433 { input0.data(), input1.data(), input2.data() },
434 outputTensorInfo,
435 output.data(),
436 0,
437 true);
438
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100439 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
440 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
441 {
442 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
443 },
444 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100445
446 return result;
447}
448
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100449template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100450LayerTestResult<T, 2> Concat2dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100451 IWorkloadFactory& workloadFactory,
452 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
453 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100454 unsigned int dimension,
455 const float qScale,
456 const int32_t qOffset)
457{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100458 TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100459
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100460 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
461 {
462 // Batch 0
463 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100464
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100465 // Batch 1
466 10.0f, 11.0f, 12.0f,
467 },
468 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100469
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100470 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
471 {
472 // Batch 0
473 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100474
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100475 // Batch 1
476 13.0f, 14.0f, 15.0f,
477 },
478 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100479
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100480 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
481 {
482 // Batch 0
483 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100484
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100485 // Batch 1
486 16.0f, 17.0f, 18.0f,
487 },
488 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100489
490 LayerTestResult<T, 2> result(outputTensorInfo);
491
492 std::vector<T> output;
493 output.resize(outputTensorInfo.GetNumElements());
494 Concatenate<T>(workloadFactory, memoryManager,
495 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
496 { input0.data(), input1.data(), input2.data() },
497 outputTensorInfo,
498 output.data(),
499 dimension,
500 true);
501
502 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
503 return result;
504}
505
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100506template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100507LayerTestResult<T, 2> Concat2dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100508 IWorkloadFactory& workloadFactory,
509 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100510 float qScale,
511 int32_t qOffset)
512{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100513 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100514
515 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
516 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
517
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100518 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
519 {
520 // Batch 0
521 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100522
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100523 // Batch 1
524 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100525
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100526 // Batch 2
527 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100528
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100529 // Batch 3
530 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100531
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100532 // Batch 4
533 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100534
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100535 // Batch 5
536 16.0f, 17.0f, 18.0f,
537 },
538 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100539
540 return result;
541}
542
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100543template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100544LayerTestResult<T, 2> Concat2dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100545 IWorkloadFactory& workloadFactory,
546 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100547 float qScale,
548 int32_t qOffset)
549{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100550 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100551
552 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
553 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
554
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100555 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
556 {
557 // Batch 0
558 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100559
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100560 // Batch 1
561 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
562 },
563 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100564
565 return result;
566}
567
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100568template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100569LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100570 IWorkloadFactory& workloadFactory,
571 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100572 float qScale,
573 int32_t qOffset)
574{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100575 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
576 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
577 {
578 // Batch 0
579 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100580
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100581 // Batch 1
582 10.0f, 11.0f, 12.0f,
583 },
584 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100585
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100586 TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
587 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
588 {
589 // Batch 0
590 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100591
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100592 // Batch 1
593 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100594
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100595 // Batch 0
596 7.0f, 8.0f, 9.0f,
597 },
598 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100599
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100600 TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
601 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
602 {
603 // Batch 1
604 16.0f, 17.0f, 18.0f,
605 },
606 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100607
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100608 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100609 LayerTestResult<T, 2> result(outputTensorInfo);
610
611 std::vector<T> output;
612 output.resize(outputTensorInfo.GetNumElements());
613 Concatenate<T>(workloadFactory, memoryManager,
614 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
615 { input0.data(), input1.data(), input2.data() },
616 outputTensorInfo,
617 output.data(),
618 0,
619 true);
620
621 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100622 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
623 {
624 // Batch 0
625 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100626
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100627 // Batch 1
628 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100629
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100630 // Batch 2
631 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100632
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100633 // Batch 3
634 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100635
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100636 // Batch 4
637 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100638
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100639 // Batch 5
640 16.0f, 17.0f, 18.0f,
641 },
642 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100643
644 return result;
645}
646
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100647template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100648LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100649 IWorkloadFactory& workloadFactory,
650 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100651 float qScale,
652 int32_t qOffset)
653{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100654 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
655 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
656 {
657 // Batch 0
658 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100659
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100660 // Batch 1
661 10.0f, 11.0f, 12.0f,
662 },
663 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100664
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100665 TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
666 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
667 {
668 // Batch 0
669 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100670
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100671 // Batch 1
672 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
673 },
674 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100675
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100676 TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
677 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
678 {
679 // Batch 0
680 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100681
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100682 // Batch 1
683 18.0f
684 },
685 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100686
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100687 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100688 LayerTestResult<T, 2> result(outputTensorInfo);
689
690 std::vector<T> output;
691 output.resize(outputTensorInfo.GetNumElements());
692 Concatenate<T>(workloadFactory, memoryManager,
693 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
694 { input0.data(), input1.data(), input2.data() },
695 outputTensorInfo,
696 output.data(),
697 1,
698 true);
699
700 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100701 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
702 {
703 // Batch 0
704 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100705
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100706 // Batch 1
707 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
708 },
709 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100710
711 return result;
712}
713
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100714template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100715LayerTestResult<T, 3> Concat3dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100716 IWorkloadFactory& workloadFactory,
717 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
718 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100719 unsigned int dimension,
720 bool useSubtensor,
721 float qScale,
722 int32_t qOffset)
723{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100724 TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100725
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100726 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
727 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100728 // Batch 0, Channel 0
729 1.0f, 2.0f,
730
731 // Batch 0, Channel 1
732 3.0f, 4.0f,
733
734 // Batch 0, Channel 2
735 5.0f, 6.0f,
736
737 // Batch 1, Channel 0
738 19.0f, 20.0f,
739
740 // Batch 1, Channel 1
741 21.0f, 22.0f,
742
743 // Batch 1, Channel 2
744 23.0f, 24.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100745 },
746 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100747
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100748 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
749 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100750 // Batch 0, Channel 0
751 7.0f, 8.0f,
752
753 // Batch 0, Channel 1
754 9.0f, 10.0f,
755
756 // Batch 0, Channel 2
757 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100758
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100759 // Batch 1, Channel 0
760 25.0f, 26.0f,
761
762 // Batch 1, Channel 1
763 27.0f, 28.0f,
764
765 // Batch 1, Channel 2
766 29.0f, 30.0f
767 },
768 qScale, qOffset));
769
770 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
771 {
772 // Batch 0, Channel 0
773 13.0f, 14.0f,
774
775 // Batch 0, Channel 1
776 15.0f, 16.0f,
777
778 // Batch 0, Channel 2
779 17.0f, 18.0f,
780
781 // Batch 1, Channel 0
782 31.0f, 32.0f,
783
784 // Batch 1, Channel 1
785 33.0f, 34.0f,
786
787 // Batch 1, Channel 2
788 35.0f, 36.0f
789 },
790 qScale, qOffset));
791
792 LayerTestResult<T, 3> result(outputTensorInfo);
793
794 std::vector<T> output;
795 output.resize(outputTensorInfo.GetNumElements());
796 Concatenate<T>(workloadFactory, memoryManager,
797 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
798 { input0.data(), input1.data(), input2.data() },
799 outputTensorInfo,
800 output.data(),
801 dimension,
802 useSubtensor);
803
804 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
805 return result;
806}
807
808template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
809LayerTestResult<T, 3> Concat3dDim0TestImpl(
810 IWorkloadFactory& workloadFactory,
811 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
812 float qScale,
813 int32_t qOffset)
814{
815 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
816
817 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
818 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
819
820 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
821 {
822 // Batch 0, Channel 0
823 1.0f, 2.0f,
824
825 // Batch 0, Channel 1
826 3.0f, 4.0f,
827
828 // Batch 0, Channel 2
829 5.0f, 6.0f,
830
831 // Batch 1, Channel 0
832 19.0f, 20.0f,
833
834 // Batch 1, Channel 1
835 21.0f, 22.0f,
836
837 // Batch 1, Channel 2
838 23.0f, 24.0f,
839
840 // Batch 2, Channel 0
841 7.0f, 8.0f,
842
843 // Batch 2, Channel 1
844 9.0f, 10.0f,
845
846 // Batch 2, Channel 2
847 11.0f, 12.0f,
848
849 // Batch 3, Channel 0
850 25.0f, 26.0f,
851
852 // Batch 3, Channel 1
853 27.0f, 28.0f,
854
855 // Batch 3, Channel 2
856 29.0f, 30.0f,
857
858 // Batch 4, Channel 0
859 13.0f, 14.0f,
860
861 // Batch 4, Channel 1
862 15.0f, 16.0f,
863
864 // Batch 4, Channel 2
865 17.0f, 18.0f,
866
867 // Batch 5, Channel 0
868 31.0f, 32.0f,
869
870 // Batch 5, Channel 1
871 33.0f, 34.0f,
872
873 // Batch 5, Channel 2
874 35.0f, 36.0f
875 },
876 qScale, qOffset));
877
878 return result;
879}
880
881template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
882LayerTestResult<T, 3> Concat3dDim1TestImpl(
883 IWorkloadFactory& workloadFactory,
884 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
885 float qScale,
886 int32_t qOffset)
887{
888 TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
889
890 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
891 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
892
893 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
894 {
895 // Batch 0, Channel 0
896 1.0f, 2.0f,
897
898 // Batch 0, Channel 1
899 3.0f, 4.0f,
900
901 // Batch 0, Channel 2
902 5.0f, 6.0f,
903
904 // Batch 0, Channel 3
905 7.0f, 8.0f,
906
907 // Batch 0, Channel 4
908 9.0f, 10.0f,
909
910 // Batch 0, Channel 5
911 11.0f, 12.0f,
912
913 // Batch 0, Channel 6
914 13.0f, 14.0f,
915
916 // Batch 0, Channel 7
917 15.0f, 16.0f,
918
919 // Batch 0, Channel 8
920 17.0f, 18.0f,
921
922 // Batch 1, Channel 0
923 19.0f, 20.0f,
924
925 // Batch 1, Channel 1
926 21.0f, 22.0f,
927
928 // Batch 1, Channel 2
929 23.0f, 24.0f,
930
931 // Batch 1, Channel 3
932 25.0f, 26.0f,
933
934 // Batch 1, Channel 4
935 27.0f, 28.0f,
936
937 // Batch 1, Channel 5
938 29.0f, 30.0f,
939
940 // Batch 1, Channel 6
941 31.0f, 32.0f,
942
943 // Batch 1, Channel 7
944 33.0f, 34.0f,
945
946 // Batch 1, Channel 8
947 35.0f, 36.0f
948 },
949 qScale, qOffset));
950
951 return result;
952}
953
954template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
955LayerTestResult<T, 3> Concat3dDim2TestImpl(
956 IWorkloadFactory& workloadFactory,
957 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
958 bool useSubtensor,
959 float qScale,
960 int32_t qOffset)
961{
962 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
963
964 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
965 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
966
967 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
968 {
969 // Batch 0, Channel 0
970 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
971
972 // Batch 0, Channel 1
973 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
974
975 // Batch 0, Channel 2
976 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
977
978 // Batch 1, Channel 0
979 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
980
981 // Batch 1, Channel 1
982 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
983
984 // Batch 1, Channel 2
985 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
986 },
987 qScale, qOffset));
988
989 return result;
990}
991
992template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
993LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
994 IWorkloadFactory& workloadFactory,
995 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
996 float qScale,
997 int32_t qOffset)
998{
999 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
1000 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1001 {
1002 // Batch 0, Channel 0
1003 1.0f, 2.0f,
1004
1005 // Batch 0, Channel 1
1006 3.0f, 4.0f,
1007
1008 // Batch 0, Channel 2
1009 5.0f, 6.0f,
1010
1011 // Batch 1, Channel 0
1012 19.0f, 20.0f,
1013
1014 // Batch 1, Channel 1
1015 21.0f, 22.0f,
1016
1017 // Batch 1, Channel 2
1018 23.0f, 24.0f
1019 },
1020 qScale, qOffset));
1021
1022 TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1023 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1024 {
1025 // Batch 0, Channel 0
1026 7.0f, 8.0f,
1027
1028 // Batch 0, Channel 1
1029 9.0f, 10.0f,
1030
1031 // Batch 0, Channel 2
1032 11.0f, 12.0f,
1033 },
1034 qScale, qOffset));
1035
1036 TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1037 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1038 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001039 // Batch 0, Channel 0
1040 25.0f, 26.0f,
1041
1042 // Batch 0, Channel 1
1043 27.0f, 28.0f,
1044
1045 // Batch 0, Channel 2
1046 29.0f, 30.0f,
1047
1048 // Batch 1, Channel 0
1049 13.0f, 14.0f,
1050
1051 // Batch 1, Channel 1
1052 15.0f, 16.0f,
1053
1054 // Batch 1, Channel 2
1055 17.0f, 18.0f,
1056
1057 // Batch 2, Channel 0
1058 31.0f, 32.0f,
1059
1060 // Batch 2, Channel 1
1061 33.0f, 34.0f,
1062
1063 // Batch 2, Channel 2
1064 35.0f, 36.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001065 },
1066 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001067
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001068 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001069 LayerTestResult<T, 3> result(outputTensorInfo);
1070
1071 std::vector<T> output;
1072 output.resize(outputTensorInfo.GetNumElements());
1073 Concatenate<T>(workloadFactory, memoryManager,
1074 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1075 { input0.data(), input1.data(), input2.data() },
1076 outputTensorInfo,
1077 output.data(),
1078 0,
1079 true);
1080
1081 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001082 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1083 {
1084 // Batch 0, Channel 0
1085 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001086
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001087 // Batch 0, Channel 1
1088 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001089
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001090 // Batch 0, Channel 2
1091 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001092
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001093 // Batch 1, Channel 0
1094 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001095
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001096 // Batch 1, Channel 1
1097 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001098
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001099 // Batch 1, Channel 2
1100 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001101
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001102 // Batch 2, Channel 0
1103 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001104
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001105 // Batch 2, Channel 1
1106 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001107
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001108 // Batch 2, Channel 2
1109 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001110
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001111 // Batch 3, Channel 0
1112 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001113
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001114 // Batch 3, Channel 1
1115 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001116
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001117 // Batch 3, Channel 2
1118 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001119
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001120 // Batch 4, Channel 0
1121 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001122
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001123 // Batch 4, Channel 1
1124 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001125
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001126 // Batch 4, Channel 2
1127 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001128
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001129 // Batch 5, Channel 0
1130 31.0f, 32.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001131
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001132 // Batch 5, Channel 1
1133 33.0f, 34.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001134
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001135 // Batch 5, Channel 2
1136 35.0f, 36.0f
1137 },
1138 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001139
1140 return result;
1141}
1142
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001143template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001144LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001145 IWorkloadFactory& workloadFactory,
1146 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001147 float qScale,
1148 int32_t qOffset)
1149{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001150 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1151 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1152 {
1153 // Batch 0, Channel 0
1154 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001155
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001156 // Batch 0, Channel 1
1157 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001158
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001159 // Batch 0, Channel 2
1160 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001161
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001162 // Batch 1, Channel 0
1163 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001164
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001165 // Batch 1, Channel 1
1166 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001167
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001168 // Batch 1, Channel 2
1169 23.0f, 24.0f
1170 },
1171 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001172
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001173 TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1174 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1175 {
1176 // Batch 0, Channel 0
1177 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001178
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001179 // Batch 0, Channel 1
1180 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001181
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001182 // Batch 0, Channel 2
1183 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001184
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001185 // Batch 0, Channel 3
1186 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001187
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001188 // Batch 1, Channel 0
1189 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001190
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001191 // Batch 1, Channel 1
1192 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001193
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001194 // Batch 1, Channel 2
1195 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001196
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001197 // Batch 1, Channel 3
1198 15.0f, 16.0f,
1199 },
1200 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001201
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001202 TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1203 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1204 {
1205 // Batch 0, Channel 0
1206 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001207
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001208 // Batch 1, Channel 0
1209 31.0f, 32.0f,
1210 },
1211 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001212
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001213 TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001214 LayerTestResult<T, 3> result(outputTensorInfo);
1215
1216 std::vector<T> output;
1217 output.resize(outputTensorInfo.GetNumElements());
1218 Concatenate<T>(workloadFactory, memoryManager,
1219 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1220 { input0.data(), input1.data(), input2.data() },
1221 outputTensorInfo,
1222 output.data(),
1223 1,
1224 true);
1225
1226 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001227 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1228 {
1229 // Batch 0, Channel 0
1230 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001231
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001232 // Batch 0, Channel 1
1233 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001234
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001235 // Batch 0, Channel 2
1236 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001237
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001238 // Batch 0, Channel 3
1239 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001240
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001241 // Batch 0, Channel 4
1242 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001243
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001244 // Batch 0, Channel 5
1245 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001246
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001247 // Batch 0, Channel 6
1248 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001249
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001250 // Batch 0, Channel 7
1251 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001252
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001253 // Batch 1, Channel 0
1254 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001255
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001256 // Batch 1, Channel 1
1257 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001258
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001259 // Batch 1, Channel 2
1260 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001261
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001262 // Batch 1, Channel 3
1263 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001264
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001265 // Batch 1, Channel 4
1266 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001267
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001268 // Batch 1, Channel 5
1269 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001270
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001271 // Batch 1, Channel 6
1272 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001273
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001274 // Batch 1, Channel 7
1275 31.0f, 32.0f,
1276 },
1277 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001278
1279 return result;
1280}
1281
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001282template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001283LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001284 IWorkloadFactory& workloadFactory,
1285 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001286 bool useSubtensor,
1287 float qScale,
1288 int32_t qOffset)
1289{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001290 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1291 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1292 {
1293 // Batch 0, Channel 0
1294 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001295
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001296 // Batch 0, Channel 1
1297 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001298
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001299 // Batch 0, Channel 2
1300 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001301
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001302 // Batch 1, Channel 0
1303 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001304
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001305 // Batch 1, Channel 1
1306 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001307
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001308 // Batch 1, Channel 2
1309 23.0f, 24.0f
1310 },
1311 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001312
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001313 TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1314 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1315 {
1316 // Batch 0, Channel 0
1317 7.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001318
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001319 // Batch 0, Channel 1
1320 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001321
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001322 // Batch 0, Channel 2
1323 11.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001324
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001325 // Batch 1, Channel 0
1326 25.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001327
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001328 // Batch 1, Channel 1
1329 27.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001330
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001331 // Batch 1, Channel 2
1332 29.0f
1333 },
1334 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001335
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001336 TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1337 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1338 {
1339 // Batch 0, Channel 0
1340 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001341
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001342 // Batch 0, Channel 1
1343 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001344
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001345 // Batch 0, Channel 2
1346 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001347
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001348 // Batch 1, Channel 0
1349 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001350
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001351 // Batch 1, Channel 1
1352 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001353
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001354 // Batch 1, Channel 2
1355 35.0f, 36.0f, 55.0f,
1356 },
1357 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001358
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001359 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001360 LayerTestResult<T, 3> result(outputTensorInfo);
1361
1362 std::vector<T> output;
1363 output.resize(outputTensorInfo.GetNumElements());
1364 Concatenate<T>(workloadFactory, memoryManager,
1365 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1366 { input0.data(), input1.data(), input2.data() },
1367 outputTensorInfo,
1368 output.data(),
1369 2,
1370 useSubtensor);
1371
1372 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001373 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1374 {
1375 // Batch 0, Channel 0
1376 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001377
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001378 // Batch 0, Channel 1
1379 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001380
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001381 // Batch 0, Channel 2
1382 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001383
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001384 // Batch 1, Channel 0
1385 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001386
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001387 // Batch 1, Channel 1
1388 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001389
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001390 // Batch 1, Channel 2
1391 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1392 },
1393 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001394
1395 return result;
1396}
1397
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001398template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001399LayerTestResult<T, 4> Concat4dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001400 IWorkloadFactory& workloadFactory,
1401 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1402 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001403 unsigned int dimension,
1404 bool useSubtensor,
1405 float qScale,
1406 int32_t qOffset)
1407{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001408 TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001409
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001410 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1411 {
1412 1.0f, 2.0f,
1413 3.0f, 4.0f,
1414 5.0f, 6.0f,
1415 7.0f, 8.0f,
1416 9.0f, 10.0f,
1417 11.0f, 12.0f
1418 },
1419 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001420
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001421 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1422 {
1423 11.0f, 12.0f,
1424 13.0f, 14.0f,
1425 15.0f, 16.0f,
1426 17.0f, 18.0f,
1427 19.0f, 20.0f,
1428 21.0f, 22.0f
1429 },
1430 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001431
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001432 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1433 {
1434 21.0f, 22.0f,
1435 23.0f, 24.0f,
1436 25.0f, 26.0f,
1437 27.0f, 28.0f,
1438 29.0f, 30.0f,
1439 31.0f, 32.0f
1440 },
1441 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001442
1443 LayerTestResult<T, 4> result(outputTensorInfo);
1444
1445 std::vector<T> output;
1446 output.resize(outputTensorInfo.GetNumElements());
1447
1448 Concatenate<T>(workloadFactory,
1449 memoryManager,
1450 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1451 {input0.data(), input1.data(), input2.data()},
1452 outputTensorInfo,
1453 output.data(),
1454 dimension,
1455 useSubtensor);
1456
1457 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1458 return result;
1459}
1460
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001461template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001462LayerTestResult<T, 4> Concat4dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001463 IWorkloadFactory& workloadFactory,
1464 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001465 float qScale,
1466 int32_t qOffset)
1467{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001468 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001469
1470 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1471 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1472
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001473 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1474 {
1475 1.0f, 2.0f,
1476 3.0f, 4.0f,
1477 5.0f, 6.0f,
1478 7.0f, 8.0f,
1479 9.0f, 10.0f,
1480 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001481
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001482 11.0f, 12.0f,
1483 13.0f, 14.0f,
1484 15.0f, 16.0f,
1485 17.0f, 18.0f,
1486 19.0f, 20.0f,
1487 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001488
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001489 21.0f, 22.0f,
1490 23.0f, 24.0f,
1491 25.0f, 26.0f,
1492 27.0f, 28.0f,
1493 29.0f, 30.0f,
1494 31.0f, 32.0f
1495 },
1496 qScale, qOffset));
1497
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001498 return result;
1499}
1500
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001501template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001502LayerTestResult<T, 4> Concat4dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001503 IWorkloadFactory& workloadFactory,
1504 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001505 float qScale,
1506 int32_t qOffset)
1507{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001508 TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001509
1510 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1511 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1512
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001513 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1514 {
1515 1.0f, 2.0f,
1516 3.0f, 4.0f,
1517 5.0f, 6.0f,
1518 7.0f, 8.0f,
1519 9.0f, 10.0f,
1520 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001521
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001522 11.0f, 12.0f,
1523 13.0f, 14.0f,
1524 15.0f, 16.0f,
1525 17.0f, 18.0f,
1526 19.0f, 20.0f,
1527 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001528
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001529 21.0f, 22.0f,
1530 23.0f, 24.0f,
1531 25.0f, 26.0f,
1532 27.0f, 28.0f,
1533 29.0f, 30.0f,
1534 31.0f, 32.0f
1535 },
1536 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001537
1538 return result;
1539}
1540
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001541template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001542LayerTestResult<T, 4> Concat4dDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001543 IWorkloadFactory& workloadFactory,
1544 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001545 float qScale,
1546 int32_t qOffset)
1547{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001548 TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001549
1550 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1551 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1552
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001553 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1554 {
1555 1.0f, 2.0f,
1556 3.0f, 4.0f,
1557 11.0f, 12.0f,
1558 13.0f, 14.0f,
1559 21.0f, 22.0f,
1560 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001561
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001562 5.0f, 6.0f,
1563 7.0f, 8.0f,
1564 15.0f, 16.0f,
1565 17.0f, 18.0f,
1566 25.0f, 26.0f,
1567 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001568
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001569 9.0f, 10.0f,
1570 11.0f, 12.0f,
1571 19.0f, 20.0f,
1572 21.0f, 22.0f,
1573 29.0f, 30.0f,
1574 31.0f, 32.0f
1575 },
1576 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001577
1578 return result;
1579}
1580
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001581template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001582LayerTestResult<T, 4> Concat4dDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001583 IWorkloadFactory& workloadFactory,
1584 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001585 float qScale,
1586 int32_t qOffset,
1587 bool useSubtensor)
1588{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001589 TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001590
1591 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1592 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1593
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001594 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1595 {
1596 1.0f, 2.0f,
1597 11.0f, 12.0f,
1598 21.0f, 22.0f,
1599 3.0f, 4.0f,
1600 13.0f, 14.0f,
1601 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001602
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001603 5.0f, 6.0f,
1604 15.0f, 16.0f,
1605 25.0f, 26.0f,
1606 7.0f, 8.0f,
1607 17.0f, 18.0f,
1608 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001609
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001610 9.0f, 10.0f,
1611 19.0f, 20.0f,
1612 29.0f, 30.0f,
1613 11.0f, 12.0f,
1614 21.0f, 22.0f,
1615 31.0f, 32.0f
1616 },
1617 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001618
1619 return result;
1620}
1621
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001622template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001623LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001624 IWorkloadFactory& workloadFactory,
1625 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001626 float qScale,
1627 int32_t qOffset)
1628{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001629 constexpr unsigned int dimension = 0u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001630
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001631 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1632 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1633 {
1634 1.0f, 2.0f,
1635 3.0f, 4.0f,
1636 5.0f, 6.0f,
1637 7.0f, 8.0f,
1638 9.0f, 10.0f,
1639 11.0f, 12.0f
1640 },
1641 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001642
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001643 TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001644
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001645 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1646 {
1647 11.0f, 12.0f,
1648 13.0f, 14.0f,
1649 15.0f, 16.0f,
1650 17.0f, 18.0f,
1651 19.0f, 20.0f,
1652 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001653
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001654 21.0f, 22.0f,
1655 23.0f, 24.0f,
1656 25.0f, 26.0f,
1657 27.0f, 28.0f,
1658 29.0f, 30.0f,
1659 31.0f, 32.0f
1660 },
1661 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001662
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001663 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001664
1665 LayerTestResult<T, 4> result(outputTensorInfo);
1666
1667 std::vector<T> output;
1668 output.resize(outputTensorInfo.GetNumElements());
1669 Concatenate<T>(workloadFactory,
1670 memoryManager,
1671 {inputTensorInfo0, inputTensorInfo1},
1672 {input0.data(), input1.data()},
1673 outputTensorInfo,
1674 output.data(),
1675 dimension,
1676 true);
1677
1678 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001679 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1680 {
1681 1.0f, 2.0f,
1682 3.0f, 4.0f,
1683 5.0f, 6.0f,
1684 7.0f, 8.0f,
1685 9.0f, 10.0f,
1686 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001687
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001688 11.0f, 12.0f,
1689 13.0f, 14.0f,
1690 15.0f, 16.0f,
1691 17.0f, 18.0f,
1692 19.0f, 20.0f,
1693 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001694
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001695 21.0f, 22.0f,
1696 23.0f, 24.0f,
1697 25.0f, 26.0f,
1698 27.0f, 28.0f,
1699 29.0f, 30.0f,
1700 31.0f, 32.0f
1701 },
1702 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001703
1704 return result;
1705}
1706
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001707template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001708LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001709 IWorkloadFactory& workloadFactory,
1710 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001711 float qScale,
1712 int32_t qOffset)
1713{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001714 constexpr unsigned int dimension = 1u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001715
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001716 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1717 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1718 {
1719 1.0f, 2.0f,
1720 3.0f, 4.0f,
1721 5.0f, 6.0f,
1722 7.0f, 8.0f,
1723 9.0f, 10.0f,
1724 11.0f, 12.0f
1725 },
1726 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001727
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001728 TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001729
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001730 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1731 {
1732 11.0f, 12.0f,
1733 13.0f, 14.0f,
1734 15.0f, 16.0f,
1735 17.0f, 18.0f,
1736 },
1737 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001738
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001739 TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001740
1741 LayerTestResult<T, 4> result(outputTensorInfo);
1742
1743 std::vector<T> output;
1744 output.resize(outputTensorInfo.GetNumElements());
1745 Concatenate<T>(workloadFactory,
1746 memoryManager,
1747 {inputTensorInfo0, inputTensorInfo1},
1748 {input0.data(), input1.data()},
1749 outputTensorInfo,
1750 output.data(),
1751 dimension,
1752 true);
1753
1754 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001755 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1756 {
1757 1.0f, 2.0f,
1758 3.0f, 4.0f,
1759 5.0f, 6.0f,
1760 7.0f, 8.0f,
1761 9.0f, 10.0f,
1762 11.0f, 12.0f,
1763 11.0f, 12.0f,
1764 13.0f, 14.0f,
1765 15.0f, 16.0f,
1766 17.0f, 18.0f
1767 },
1768 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001769
1770 return result;
1771}
1772
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001773template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001774LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001775 IWorkloadFactory& workloadFactory,
1776 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001777 float qScale,
1778 int32_t qOffset)
1779{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001780 constexpr unsigned int dimension = 2u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001781
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001782 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1783 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1784 {
1785 1.0f, 2.0f,
1786 3.0f, 4.0f,
1787 5.0f, 6.0f,
1788 7.0f, 8.0f,
1789 9.0f, 10.0f,
1790 11.0f, 12.0f
1791 },
1792 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001793
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001794 TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1795 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1796 {
1797 11.0f, 12.0f,
1798 13.0f, 14.0f,
1799 15.0f, 16.0f,
1800 17.0f, 18.0f,
1801 19.0f, 20.0f,
1802 21.0f, 22.0f,
1803 23.0f, 24.0f,
1804 25.0f, 26.0f,
1805 27.0f, 28.0f
1806 },
1807 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001808
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001809 TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001810 LayerTestResult<T, 4> result(outputTensorInfo);
1811
1812 std::vector<T> output;
1813 output.resize(outputTensorInfo.GetNumElements());
1814 Concatenate<T>(workloadFactory,
1815 memoryManager,
1816 {inputTensorInfo0, inputTensorInfo1},
1817 {input0.data(), input1.data()},
1818 outputTensorInfo,
1819 output.data(),
1820 dimension,
1821 true);
1822
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001823 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1824 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1825 {
1826 1.0f, 2.0f,
1827 3.0f, 4.0f,
1828 11.0f, 12.0f,
1829 13.0f, 14.0f,
1830 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001831
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001832 5.0f, 6.0f,
1833 7.0f, 8.0f,
1834 17.0f, 18.0f,
1835 19.0f, 20.0f,
1836 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001837
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001838 9.0f, 10.0f,
1839 11.0f, 12.0f,
1840 23.0f, 24.0f,
1841 25.0f, 26.0f,
1842 27.0f, 28.0f
1843 },
1844 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001845
1846 return result;
1847}
1848
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001849template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001850LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001851 IWorkloadFactory& workloadFactory,
1852 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001853 float qScale,
1854 int32_t qOffset,
1855 bool useSubtensor)
1856{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001857 constexpr unsigned int dimension = 3u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001858
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001859 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1860 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1861 {
1862 1.0f, 2.0f,
1863 3.0f, 4.0f,
1864 5.0f, 6.0f,
1865 7.0f, 8.0f,
1866 9.0f, 10.0f,
1867 11.0f, 12.0f
1868 },
1869 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001870
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001871 TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1872 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1873 {
1874 11.0f, 12.0f, 13.0f,
1875 14.0f, 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001876
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001877 17.0f, 18.0f, 19.0f,
1878 20.0f, 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001879
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001880 23.0f, 24.0f, 25.0f,
1881 26.0f, 27.0f, 28.0f
1882 },
1883 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001884
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001885 TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001886
1887 LayerTestResult<T, 4> result(outputTensorInfo);
1888
1889 std::vector<T> output;
1890 output.resize(outputTensorInfo.GetNumElements());
1891 Concatenate<T>(workloadFactory,
1892 memoryManager,
1893 {inputTensorInfo0, inputTensorInfo1},
1894 {input0.data(), input1.data()},
1895 outputTensorInfo,
1896 output.data(),
1897 dimension,
1898 useSubtensor);
1899
1900 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001901 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1902 {
1903 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1904 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1905 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1906 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1907 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1908 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1909 },
1910 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001911
1912 return result;
1913}
1914
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001915template<DataType ArmnnType, typename T>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001916LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001917 IWorkloadFactory& workloadFactory,
1918 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001919 bool useSubtensor)
1920{
Jan Eilers8eb25602020-03-09 12:13:48 +00001921 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00001922
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001923 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001924 TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1925 TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1926 TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001927
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001928 std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001929
1930 // Quantized input1 tensor.
1931 const float inputScale1 = 0.5f;
1932 const int32_t inputOffset1 = 5;
1933
1934 auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1935 {
1936 1, 2, 3,
1937 4, 5, 6,
1938 7, 8, 9,
1939 10, 11, 12,
1940 13, 14, 15,
1941 16, 17, 18,
1942
1943 19, 20, 21,
1944 22, 23, 24,
1945 25, 26, 27,
1946 28, 29, 30,
1947 31, 32, 33,
1948 34, 35, 36
1949 }));
1950
1951 // Quatized input2 tensor.
1952 const float inputScale2 = 0.2f;
1953 const int32_t inputOffset2 = 10;
1954
1955 auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1956 {
1957 37, 38, 39,
1958 40, 41, 42,
1959 43, 44, 45,
1960 46, 47, 48,
1961 49, 50, 51,
1962 52, 53, 54
1963 }));
1964
1965 // Quantized output tensor.
1966 const float outputScale = 0.1f;
1967 const int32_t outputOffset = 20;
1968
1969 LayerTestResult<T, 3> ret(outputTensorInfo);
1970
1971 ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1972 {
1973 0, 5, 74,
1974 10, 15, 76,
1975 20, 25, 78,
1976 30, 35, 80,
1977 40, 45, 82,
1978 50, 55, 84,
1979
1980 60, 65, 86,
1981 70, 75, 88,
1982 80, 85, 90,
1983 90, 95, 92,
1984 100, 105, 94,
1985 110, 115, 96,
1986
1987 120, 125, 98,
1988 130, 135, 100,
1989 140, 145, 102,
1990 150, 155, 104,
1991 160, 165, 106,
1992 170, 175, 108
1993 }));
1994
1995 outputTensorInfo.SetQuantizationScale(outputScale);
1996 outputTensorInfo.SetQuantizationOffset(outputOffset);
1997 inputTensorInfo1.SetQuantizationScale(inputScale1);
1998 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1999 inputTensorInfo2.SetQuantizationScale(inputScale2);
2000 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2001
2002 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002003 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002004
2005 std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002006 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002008 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002009
2010 bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2011
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002012 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002013 subTensorsSupported ?
2014 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2015 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2016
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002017 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002018 subTensorsSupported ?
2019 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2020 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2021
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002022 ConcatQueueDescriptor data;
2023 OriginsDescriptor desc = CreateDescriptorForConcatenation(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002024 inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2025 data.m_Parameters = desc;
2026
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002027 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002028 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2029 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2030 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2031
2032 data.m_ViewOrigins.push_back(window1);
2033 data.m_ViewOrigins.push_back(window2);
2034
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002035 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002036
2037 inputHandle1->Allocate();
2038 inputHandle2->Allocate();
2039 outputHandle->Allocate();
2040
2041 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2042 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2043
2044 workload->PostAllocationConfigure();
2045 workload->Execute();
2046
2047 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2048
2049 return ret;
2050}
2051
2052//
2053// Explicit template specializations
2054//
2055
Derek Lambertif90c56d2020-01-10 17:14:08 +00002056template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
2057ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002058 IWorkloadFactory& workloadFactory,
2059 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002060 bool useSubtensor);
2061
Derek Lambertif90c56d2020-01-10 17:14:08 +00002062template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
2063ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002064 IWorkloadFactory& workloadFactory,
2065 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002066 bool useSubtensor);
2067
2068//
2069// Implementation functions
2070//
2071
2072LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002073 IWorkloadFactory& workloadFactory,
2074 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002075{
Jan Eilers8eb25602020-03-09 12:13:48 +00002076 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002077
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002078 unsigned int outputWidth = 3;
2079 unsigned int outputHeight = 6;
2080 unsigned int outputChannels = 3;
2081
2082 unsigned int inputWidth1 = 3;
2083 unsigned int inputHeight1 = 6;
2084 unsigned int inputChannels1 = 2;
2085
2086 unsigned int inputWidth2 = 3;
2087 unsigned int inputHeight2 = 6;
2088 unsigned int inputChannels2 = 1;
2089
2090 // Define the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002091 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2092 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2093 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002094
2095 LayerTestResult<float,3> ret(outputTensorInfo);
2096
2097 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2098 {
2099 1.0f, 2.0f, 3.0f,
2100 4.0f, 5.0f, 6.0f,
2101 7.0f, 8.0f, 9.0f,
2102 10.0f, 11.0f, 12.0f,
2103 13.0f, 14.0f, 15.0f,
2104 16.0f, 17.0f, 18.0f,
2105
2106 19.0f, 20.0f, 21.0f,
2107 22.0f, 23.0f, 24.0f,
2108 25.0f, 26.0f, 27.0f,
2109 28.0f, 29.0f, 30.0f,
2110 31.0f, 32.0f, 33.0f,
2111 34.0f, 35.0f, 36.0f,
2112
2113 37.0f, 38.0f, 39.0f,
2114 40.0f, 41.0f, 42.0f,
2115 43.0f, 44.0f, 45.0f,
2116 46.0f, 47.0f, 48.0f,
2117 49.0f, 50.0f, 51.0f,
2118 52.0f, 53.0f, 54.0f,
2119 })
2120 );
2121
2122 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2123 {
2124 1.0f, 2.0f, 3.0f,
2125 4.0f, 5.0f, 6.0f,
2126 7.0f, 8.0f, 9.0f,
2127 10.0f, 11.0f, 12.0f,
2128 13.0f, 14.0f, 15.0f,
2129 16.0f, 17.0f, 18.0f,
2130
2131 19.0f, 20.0f, 21.0f,
2132 22.0f, 23.0f, 24.0f,
2133 25.0f, 26.0f, 27.0f,
2134 28.0f, 29.0f, 30.0f,
2135 31.0f, 32.0f, 33.0f,
2136 34.0f, 35.0f, 36.0f,
2137 })
2138 );
2139
2140 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2141 {
2142 37.0f, 38.0f, 39.0f,
2143 40.0f, 41.0f, 42.0f,
2144 43.0f, 44.0f, 45.0f,
2145 46.0f, 47.0f, 48.0f,
2146 49.0f, 50.0f, 51.0f,
2147 52.0f, 53.0f, 54.0f,
2148 })
2149 );
2150
2151 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002152 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002153
2154 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002155 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002156
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002157 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002158
2159 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2160
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002161 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002162 subTensorsSupported ?
2163 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2164 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2165
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002166 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002167 subTensorsSupported ?
2168 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2169 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2170
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002171 ConcatQueueDescriptor data;
2172 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002173 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2174 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2175 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2176
2177 data.m_ViewOrigins.push_back(window1);
2178 data.m_ViewOrigins.push_back(window2);
2179
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002180 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002181
2182 inputHandle1->Allocate();
2183 inputHandle2->Allocate();
2184 outputHandle->Allocate();
2185
2186 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2187 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2188
2189 workload->PostAllocationConfigure();
2190 workload->Execute();
2191
2192 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2193
2194 return ret;
2195}
2196
2197LayerTestResult<float, 1> Concat1dTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002198 IWorkloadFactory& workloadFactory,
2199 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002200{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002201 return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002202}
2203
2204LayerTestResult<float, 2> Concat2dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002205 IWorkloadFactory& workloadFactory,
2206 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002207{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002208 return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002209}
2210
2211LayerTestResult<float, 2> Concat2dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002212 IWorkloadFactory& workloadFactory,
2213 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002214{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002215 return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002216}
2217
2218LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002219 IWorkloadFactory& workloadFactory,
2220 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002221{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002222 return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002223}
2224
2225LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002226 IWorkloadFactory& workloadFactory,
2227 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002228{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002229 return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002230}
2231
2232LayerTestResult<float, 3> Concat3dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002233 IWorkloadFactory& workloadFactory,
2234 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002235{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002236 return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002237}
2238
2239LayerTestResult<float, 3> Concat3dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002240 IWorkloadFactory& workloadFactory,
2241 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002242{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002243 return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002244}
2245
2246LayerTestResult<float, 3> Concat3dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002247 IWorkloadFactory& workloadFactory,
2248 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002249 bool useSubtensor)
2250{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002251 return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002252}
2253
2254LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002255 IWorkloadFactory& workloadFactory,
2256 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002257{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002258 return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002259 workloadFactory, memoryManager, 0.0f, 0);
2260}
2261
2262LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002263 IWorkloadFactory& workloadFactory,
2264 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002265{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002266 return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002267}
2268
2269LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002270 IWorkloadFactory& workloadFactory,
2271 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002272 bool useSubtensor)
2273{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002274 return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002275 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2276}
2277
2278LayerTestResult<float, 4> Concat4dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002279 IWorkloadFactory& workloadFactory,
2280 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002281{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002282 return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002283}
2284
2285LayerTestResult<float, 4> Concat4dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002286 IWorkloadFactory& workloadFactory,
2287 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002288{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002289 return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002290}
2291
2292LayerTestResult<float, 4> Concat4dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002293 IWorkloadFactory& workloadFactory,
2294 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002295{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002296 return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002297}
2298
2299LayerTestResult<float, 4> Concat4dDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002300 IWorkloadFactory& workloadFactory,
2301 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002302 bool useSubtensor)
2303{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002304 return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002305}
2306
2307LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002308 IWorkloadFactory& workloadFactory,
2309 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002310{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002311 return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002312}
2313
2314LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002315 IWorkloadFactory& workloadFactory,
2316 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002317{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002318 return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002319 workloadFactory, memoryManager, 0.0f, 0);
2320}
2321
2322LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002323 IWorkloadFactory& workloadFactory,
2324 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002325{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002326 return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002327}
2328
2329LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002330 IWorkloadFactory& workloadFactory,
2331 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002332 bool useSubtensor)
2333{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002334 return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002335 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2336}
2337
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002338LayerTestResult<Half, 3> ConcatFloat16Test(
2339 IWorkloadFactory& workloadFactory,
2340 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matthew Jackson9bff1442019-09-12 09:08:23 +01002341{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002342 return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
Matthew Jackson9bff1442019-09-12 09:08:23 +01002343}
2344
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002345LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002346 IWorkloadFactory& workloadFactory,
2347 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002348{
Jan Eilers8eb25602020-03-09 12:13:48 +00002349 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002350
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002351 unsigned int outputWidth = 3;
2352 unsigned int outputHeight = 6;
2353 unsigned int outputChannels = 3;
2354
2355 unsigned int inputWidth1 = 3;
2356 unsigned int inputHeight1 = 6;
2357 unsigned int inputChannels1 = 2;
2358
2359 unsigned int inputWidth2 = 3;
2360 unsigned int inputHeight2 = 6;
2361 unsigned int inputChannels2 = 1;
2362
2363 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002364 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2365 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2366 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002367
2368 // Quantized input1 tensor. Range [-3, 1]
2369 const float inputScale1 = 0.015686f;
2370 const int32_t inputOffset1 = 192;
2371
2372 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2373 {
2374 1, 2, 3,
2375 4, 5, 6,
2376 7, 8, 9,
2377 10, 11, 12,
2378 13, 14, 15,
2379 16, 17, 18,
2380
2381 19, 20, 21,
2382 22, 23, 24,
2383 25, 26, 27,
2384 28, 29, 30,
2385 31, 32, 33,
2386 34, 35, 36,
2387 })
2388 );
2389
2390 // Quatized input2 tensor. Range [-1, 4]
2391 const float inputScale2 = 0.019608f;
2392 const int32_t inputOffset2 = 50;
2393
2394 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2395 {
2396 37, 38, 39,
2397 40, 41, 42,
2398 43, 44, 45,
2399 46, 47, 48,
2400 49, 50, 51,
2401 52, 53, 54,
2402 })
2403 );
2404
2405 // Output has the same quantization parameters than input1,
2406 // so that only the requantization of input2 is required
2407 const float outputScale = 0.015686f;
2408 const int32_t outputOffset = 192;
2409
2410 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2411
2412 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2413 {
2414 1, 2, 3,
2415 4, 5, 6,
2416 7, 8, 9,
2417 10, 11, 12,
2418 13, 14, 15,
2419 16, 17, 18,
2420
2421 19, 20, 21,
2422 22, 23, 24,
2423 25, 26, 27,
2424 28, 29, 30,
2425 31, 32, 33,
2426 34, 35, 36,
2427
2428 176, 177, 178,
2429 179, 181, 182,
2430 183, 184, 186,
2431 187, 188, 189,
2432 191, 192, 193,
2433 195, 196, 197,
2434 })
2435 );
2436
2437 outputTensorInfo.SetQuantizationScale(outputScale);
2438 outputTensorInfo.SetQuantizationOffset(outputOffset);
2439 inputTensorInfo1.SetQuantizationScale(inputScale1);
2440 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2441 inputTensorInfo2.SetQuantizationScale(inputScale2);
2442 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2443
2444 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002445 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002446
2447 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002448 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002449
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002450 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002451
2452 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2453
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002454 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002455 subTensorsSupported ?
2456 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2457 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2458
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002459 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002460 subTensorsSupported ?
2461 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2462 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2463
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002464 ConcatQueueDescriptor data;
2465 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002466 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2467 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2468 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2469
2470 data.m_ViewOrigins.push_back(window1);
2471 data.m_ViewOrigins.push_back(window2);
2472
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002473 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002474
2475 inputHandle1->Allocate();
2476 inputHandle2->Allocate();
2477 outputHandle->Allocate();
2478
2479 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2480 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2481
2482 workload->PostAllocationConfigure();
2483 workload->Execute();
2484
2485 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2486
2487 return ret;
2488}
2489
2490LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002491 IWorkloadFactory& workloadFactory,
2492 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002493{
Jan Eilers8eb25602020-03-09 12:13:48 +00002494 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002495
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002496 unsigned int outputWidth = 3;
2497 unsigned int outputHeight = 6;
2498 unsigned int outputChannels = 3;
2499
2500 unsigned int inputWidth1 = 3;
2501 unsigned int inputHeight1 = 6;
2502 unsigned int inputChannels1 = 2;
2503
2504 unsigned int inputWidth2 = 3;
2505 unsigned int inputHeight2 = 6;
2506 unsigned int inputChannels2 = 1;
2507
2508 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002509 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2510 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2511 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002512
2513 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2514 const float scale = 0.13497836f;
2515 const int32_t offset = -7;
2516
2517 outputTensorInfo.SetQuantizationScale(scale);
2518 outputTensorInfo.SetQuantizationOffset(offset);
2519 inputTensorInfo1.SetQuantizationScale(scale);
2520 inputTensorInfo1.SetQuantizationOffset(offset);
2521 inputTensorInfo2.SetQuantizationScale(scale);
2522 inputTensorInfo2.SetQuantizationOffset(offset);
2523
2524 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2525
2526 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2527 {
2528 1, 2, 3,
2529 4, 5, 6,
2530 7, 8, 9,
2531 10, 11, 12,
2532 13, 14, 15,
2533 16, 17, 18,
2534
2535 19, 20, 21,
2536 22, 23, 24,
2537 25, 26, 27,
2538 28, 29, 30,
2539 31, 32, 33,
2540 34, 35, 36,
2541
2542 37, 38, 39,
2543 40, 41, 42,
2544 43, 44, 45,
2545 46, 47, 48,
2546 49, 50, 51,
2547 52, 53, 54,
2548 })
2549 );
2550
2551 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2552 {
2553 1, 2, 3,
2554 4, 5, 6,
2555 7, 8, 9,
2556 10, 11, 12,
2557 13, 14, 15,
2558 16, 17, 18,
2559
2560 19, 20, 21,
2561 22, 23, 24,
2562 25, 26, 27,
2563 28, 29, 30,
2564 31, 32, 33,
2565 34, 35, 36,
2566 })
2567 );
2568
2569 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2570 {
2571 37, 38, 39,
2572 40, 41, 42,
2573 43, 44, 45,
2574 46, 47, 48,
2575 49, 50, 51,
2576 52, 53, 54,
2577 })
2578 );
2579
2580 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002581 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002582
2583 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002584 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002585
2586
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002587 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002588
2589 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2590
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002591 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002592 subTensorsSupported ?
2593 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2594 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2595
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002596 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002597 subTensorsSupported ?
2598 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2599 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2600
2601
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002602 ConcatQueueDescriptor data;
2603 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002604 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2605 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2606 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2607
2608 data.m_ViewOrigins.push_back(window1);
2609 data.m_ViewOrigins.push_back(window2);
2610
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002611 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002612
2613 inputHandle1->Allocate();
2614 inputHandle2->Allocate();
2615 outputHandle->Allocate();
2616
2617 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2618 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2619
2620 workload->PostAllocationConfigure();
2621 workload->Execute();
2622
2623 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2624
2625 return ret;
2626}
2627
2628LayerTestResult<uint16_t, 3> ConcatUint16Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002629 IWorkloadFactory& workloadFactory,
2630 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002631{
Jan Eilers8eb25602020-03-09 12:13:48 +00002632 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002633
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002634 unsigned int outputWidth = 3;
2635 unsigned int outputHeight = 6;
2636 unsigned int outputChannels = 3;
2637
2638 unsigned int inputWidth1 = 3;
2639 unsigned int inputHeight1 = 6;
2640 unsigned int inputChannels1 = 2;
2641
2642 unsigned int inputWidth2 = 3;
2643 unsigned int inputHeight2 = 6;
2644 unsigned int inputChannels2 = 1;
2645
2646 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002647 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2648 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2649 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002650
2651 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2652 const float scale = 0.13497836f;
2653 const int32_t offset = -7;
2654
2655 outputTensorInfo.SetQuantizationScale(scale);
2656 outputTensorInfo.SetQuantizationOffset(offset);
2657 inputTensorInfo1.SetQuantizationScale(scale);
2658 inputTensorInfo1.SetQuantizationOffset(offset);
2659 inputTensorInfo2.SetQuantizationScale(scale);
2660 inputTensorInfo2.SetQuantizationOffset(offset);
2661
2662 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2663
2664 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2665 {
2666 1, 2, 3,
2667 4, 5, 6,
2668 7, 8, 9,
2669 10, 11, 12,
2670 13, 14, 15,
2671 16, 17, 18,
2672
2673 19, 20, 21,
2674 22, 23, 24,
2675 25, 26, 27,
2676 28, 29, 30,
2677 31, 32, 33,
2678 34, 35, 36,
2679
2680 37, 38, 39,
2681 40, 41, 42,
2682 43, 44, 45,
2683 46, 47, 48,
2684 49, 50, 51,
2685 52, 53, 54,
2686 }));
2687
2688 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2689 {
2690 1, 2, 3,
2691 4, 5, 6,
2692 7, 8, 9,
2693 10, 11, 12,
2694 13, 14, 15,
2695 16, 17, 18,
2696
2697 19, 20, 21,
2698 22, 23, 24,
2699 25, 26, 27,
2700 28, 29, 30,
2701 31, 32, 33,
2702 34, 35, 36,
2703 }));
2704
2705 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2706 {
2707 37, 38, 39,
2708 40, 41, 42,
2709 43, 44, 45,
2710 46, 47, 48,
2711 49, 50, 51,
2712 52, 53, 54,
2713 }));
2714
2715 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002716 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002717
2718 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002719 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002720
2721
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002722 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002723
2724 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2725
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002726 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002727 subTensorsSupported ?
2728 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2729 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2730
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002731 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002732 subTensorsSupported ?
2733 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2734 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2735
2736
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002737 ConcatQueueDescriptor data;
2738 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002739 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2740 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2741 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2742
2743 data.m_ViewOrigins.push_back(window1);
2744 data.m_ViewOrigins.push_back(window2);
2745
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002746 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002747
2748 inputHandle1->Allocate();
2749 inputHandle2->Allocate();
2750 outputHandle->Allocate();
2751
2752 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2753 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2754
2755 workload->PostAllocationConfigure();
2756 workload->Execute();
2757
2758 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2759
2760 return ret;
2761}
2762
2763LayerTestResult<uint8_t, 1> Concat1dUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002764 IWorkloadFactory& workloadFactory,
2765 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002766{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002767 return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002768}
2769
2770LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002771 IWorkloadFactory& workloadFactory,
2772 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002773{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002774 return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002775}
2776
2777LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002778 IWorkloadFactory& workloadFactory,
2779 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002780{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002781 return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002782}
2783
2784LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002785 IWorkloadFactory& workloadFactory,
2786 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002787{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002788 return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002789 workloadFactory, memoryManager, 0.5f, -1);
2790}
2791
2792LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002793 IWorkloadFactory& workloadFactory,
2794 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002795{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002796 return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002797 workloadFactory, memoryManager, 0.5f, -1);
2798}
2799
2800LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002801 IWorkloadFactory& workloadFactory,
2802 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002803{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002804 return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002805}
2806
2807LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002808 IWorkloadFactory& workloadFactory,
2809 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002810{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002811 return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002812}
2813
2814LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002815 IWorkloadFactory& workloadFactory,
2816 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002817 bool useSubtensor)
2818{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002819 return Concat3dDim2TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002820 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2821}
2822
2823LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002824 IWorkloadFactory& workloadFactory,
2825 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002826{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002827 return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002828}
2829
2830LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002831 IWorkloadFactory& workloadFactory,
2832 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002833{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002834 return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002835 workloadFactory, memoryManager, 0.5f, -1);
2836}
2837
2838LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002839 IWorkloadFactory& workloadFactory,
2840 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002841 bool useSubtensor)
2842{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002843 return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002844 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2845}
2846
2847LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002848 IWorkloadFactory& workloadFactory,
2849 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002850{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002851 return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002852}
2853
2854LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002855 IWorkloadFactory& workloadFactory,
2856 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002857{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002858 return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002859}
2860
2861LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002862 IWorkloadFactory& workloadFactory,
2863 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002864{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002865 return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002866}
2867
2868LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002869 IWorkloadFactory& workloadFactory,
2870 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002871{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002872 return Concat4dDim3TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002873 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2874}
2875
2876LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002877 IWorkloadFactory& workloadFactory,
2878 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002879{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002880 return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002881 workloadFactory, memoryManager, 0.5f, -1);
2882}
2883
2884LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002885 IWorkloadFactory& workloadFactory,
2886 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002887{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002888 return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002889 workloadFactory, memoryManager, 0.5f, -1);
2890}
2891
2892LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002893 IWorkloadFactory& workloadFactory,
2894 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002895{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002896 return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002897 workloadFactory, memoryManager, 0.5f, -1);
2898}
2899
2900LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002901 IWorkloadFactory& workloadFactory,
2902 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002903 bool useSubtensor)
2904{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002905 return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002906 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2907}