blob: 7f047cd323977433361073b2c6bccd3f70bf6196 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "ConcatTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
10
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011
Matteo Martincighe011d202019-11-28 11:35:47 +000012#include <armnnUtils/Permute.hpp>
13
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010019using namespace armnn;
20using namespace armnnUtils;
21
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022//
23// Helper functions and templates
24//
25
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010026OriginsDescriptor CreateDescriptorForConcat(
27 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028 unsigned int concatDim)
29{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010030 std::vector<TensorShape> shapes;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010031 shapes.reserve(inputTensorInfos.size());
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010032 for (const TensorInfo& it: inputTensorInfos)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010033 {
34 shapes.push_back(it.GetShape());
35 }
36
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010037 return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010038}
39
40//
41// Concat is only supported for N and C dimensions for NCHW and the inner most dimension
42// In case of <4 dimensions we need to make sure that the concat dimensions are at least
43// the 3rd slowest iterating one or the inner most dimension.
44//
45
46bool NeedPermuteForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010047 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010048 unsigned int concatDim)
49{
50 // See note above. Additionally we expect the input shapes to have the
51 // same number of dimensions.
52 unsigned int nDimensions = 0;
53
54 // Determine the number of dimensions as well as sanity check them
55 // agains test implementation issues.
56 for (auto && tensorInfo : inputTensorInfos)
57 {
58 if (!nDimensions)
59 {
60 nDimensions = tensorInfo.GetShape().GetNumDimensions();
61 }
62 else
63 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010064 ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010065 "Input shapes must have the same number of dimensions");
66 }
67 }
68
69 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
70}
71
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010072TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010073{
74 unsigned int numDims = inputShape.GetNumDimensions();
75 if (numDims >= 3)
76 {
77 // Nothing to do if the inputShape has at least 3 dimensions.
78 return inputShape;
79 }
80
81 std::vector<unsigned int> newDims(size_t(3), 1u);
82 unsigned int expandedBy = 3 - numDims;
83 for (unsigned int i=0; i<numDims; ++i)
84 {
85 newDims[expandedBy+i] = inputShape[i];
86 }
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010087 return TensorShape(3u, &newDims[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088}
89
90void Generate3dPermuteVectorForConcat(
91 unsigned int numDimensions,
92 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010093 std::pair<PermutationVector, PermutationVector> & permutations)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010094{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +010095 ARMNN_ASSERT_MSG(numDimensions <= 3,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096 "Only dimensions 1,2 and 3 are supported by this helper");
97 unsigned int expandedBy = 3 - numDimensions;
98 unsigned int expandedConcatAxis = concatDim + expandedBy;
99
100 if (expandedConcatAxis == 2)
101 {
102 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100103 PermutationVector forwardPermutation({1, 2, 0});
104 PermutationVector reversePermutation({2, 0, 1});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100105 permutations = std::make_pair(forwardPermutation, reversePermutation);
106 }
107 else if (expandedConcatAxis == 1)
108 {
109 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100110 PermutationVector forwardPermutation({2, 0, 1});
111 PermutationVector reversePermutation({1, 2, 0});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100112 permutations = std::make_pair(forwardPermutation, reversePermutation);
113 }
114 else
115 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100116 ARMNN_ASSERT(expandedConcatAxis == 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100117 concatDim = 0;
118 }
119}
120
121template<typename T> void PermuteTensorData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100122 IWorkloadFactory& workloadFactory,
123 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
124 const PermutationVector& mappings,
125 TensorInfo & inputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100126 const T * inputData,
127 std::vector<T>& outputData)
128{
Jan Eilers8eb25602020-03-09 12:13:48 +0000129 IgnoreUnused(memoryManager);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100130 ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100131 if (inputData == nullptr)
132 {
133 // Nullptr is an error in the test. By returning without doing the concatenation
134 // I expect the caller to fail the test. It still makes sense to report this as
135 // an assert for Debug builds.
136 return;
137 }
138
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100139 TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100140 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100141 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
142 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100143 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100144
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100145 PermuteQueueDescriptor queueDescriptor;
146 queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
147 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100148 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
149 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
150
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100151 std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100152
153 inputHandle->Allocate();
154 outputHandle->Allocate();
155
156 CopyDataToITensorHandle(inputHandle.get(), inputData);
157
158 workload->PostAllocationConfigure();
159 workload->Execute();
160
161 outputData.resize(outputTensorInfo.GetNumElements());
162 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
163 inputTensorInfo = outputTensorInfo;
164}
165
166//
167// Permute the input tensors so we can do a supported concatenation.
168// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
169// at the front. Finally this function tells what the output shape
170// of the permuted concatenated tensor is going to be.
171//
172template<typename T> void PermuteInputsForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100173 IWorkloadFactory& workloadFactory,
174 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
175 std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100176 std::vector<T *> & inputData,
177 std::vector<std::vector<T>> & inputDataStorage,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100178 PermutationVector & permuteVector,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100179 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100180 TensorInfo & outputTensorInfo)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100181{
Jan Eilers8eb25602020-03-09 12:13:48 +0000182 IgnoreUnused(memoryManager);
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100183 ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100184 "Expecting more than one tensor to be concatenated here");
185
186 unsigned int numDims = 0;
187 unsigned int nthInput = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100188 const PermutationVector identity({0, 1, 2});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100189
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100190 std::pair<PermutationVector, PermutationVector> permutations =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100191 std::make_pair(identity, identity);
192
193 inputDataStorage.resize(inputData.size());
194
195 for (auto && tensorInfo : inputTensorInfos)
196 {
197 if (numDims == 0)
198 {
199 numDims = tensorInfo.GetShape().GetNumDimensions();
200 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
201
202 // Store the reverese permutation.
203 permuteVector = permutations.second;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100204 ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100205 "Test logic error, we don't need permutation, so we shouldn't arrive here");
206 }
207 else
208 {
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100209 ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100210 "All inputs must have the same number of dimensions");
211 }
212
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100213 TensorInfo newTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100214 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
215
216 PermuteTensorData<T>(workloadFactory,
217 memoryManager,
218 permutations.first,
219 newTensorInfo,
220 inputData[nthInput],
221 inputDataStorage[nthInput]);
222
223 inputData[nthInput] = inputDataStorage[nthInput].data();
224 inputTensorInfos[nthInput] = newTensorInfo;
225
226 ++nthInput;
227 }
228
229 outputTensorInfo.SetShape(
230 armnnUtils::Permuted(
231 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
232 permutations.first));
233}
234
235//
236// This is the pair of PermuteInputsForConcat(...) which permutes back
237// the output of the concatenation so we can check it against an expected
238// output.
239//
240template <typename T> void PermuteOutputForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100241 IWorkloadFactory& workloadFactory,
242 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
243 const TensorInfo & tensorInfo,
244 const PermutationVector & permuteVector,
245 std::unique_ptr<ITensorHandle> && inputDataHandle,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100246 T * data)
247{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100248 ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100249 if (data == nullptr)
250 {
251 // Nullptr is an error in the test. By returning without doing the permutation
252 // I expect the caller to fail the test. It still makes sense to report this as
253 // an assert for Debug builds.
254 return;
255 }
256
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100257 TensorInfo resultTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100258 std::vector<T> inputData(tensorInfo.GetNumElements());
259 std::vector<T> outputData;
260
261 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
262
263 PermuteTensorData<T>(workloadFactory,
264 memoryManager,
265 permuteVector,
266 resultTensorInfo,
267 &inputData[0],
268 outputData);
269
270 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
271}
272
273template<typename T> void Concatenate(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100274 IWorkloadFactory& workloadFactory,
275 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
276 std::initializer_list<const TensorInfo> inputTensorInfosOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100277 std::initializer_list<T *> inputsOrig,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100278 const TensorInfo& outputTensorInfoOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100279 T * output,
280 unsigned int concatDim,
281 bool useSubtensor)
282{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100283 ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100284 if (output == nullptr)
285 {
286 // Nullptr is an error in the test. By returning without doing the permutation
287 // I expect the caller to fail the test. It still makes sense to report this as
288 // an assert for Debug builds.
289 return;
290 }
291
292 // Saves a copy of the parameters which we might need to change.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100293 std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100294 std::vector<T *> inputs = inputsOrig;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100295 TensorInfo outputTensorInfo = outputTensorInfoOrig;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100296
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100297 PermutationVector permuteVector{0, 1, 2};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100298
299 // Holds and automatically releases memory for the reshaped input data.
300 std::vector<std::vector<T>> tmpInputDataStorage;
301
302 const size_t inputCount = inputTensorInfos.size();
303
304 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
305
306 if (needPermuteForConcat)
307 {
308 //
309 // We need to permute the inputs, because concatenation along
310 // the requested axis is not supported.
311 //
312 PermuteInputsForConcat<T>(workloadFactory,
313 memoryManager,
314 inputTensorInfos,
315 inputs,
316 tmpInputDataStorage,
317 permuteVector,
318 concatDim,
319 outputTensorInfo);
320 }
321
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100322 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100323
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100324 std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100325 inputHandles.reserve(inputCount);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100326 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100327 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100328 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100329 ConcatQueueDescriptor queueDescriptor;
330 OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100331 queueDescriptor.m_Parameters = viewsDescriptor;
332
333 if (useSubtensor)
334 {
335 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
336 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
337 {
338 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
339 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
340 }
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100341 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100342 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100343 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100344 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
345 for (unsigned int i = 0; i < inputCount; ++i)
346 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100347 const TensorInfo& inputTensorInfo = inputTensorInfos[i];
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100348 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100349 std::unique_ptr<ITensorHandle> inputHandle =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100350 subTensorsSupported ?
351 workloadFactory.CreateSubTensorHandle(*outputHandle,
352 inputTensorInfo.GetShape(),
353 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
354 workloadFactory.CreateTensorHandle(inputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100355 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100356 inputHandles.emplace_back(std::move(inputHandle));
357 }
358
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100359
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100360 }
361 else
362 {
363 for (unsigned int i = 0; i < inputCount; ++i)
364 {
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100365 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100366 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100367 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100368 inputHandles.emplace_back(std::move(inputHandle));
369 }
370 }
371
372 for (unsigned int i = 0; i < inputCount; ++i)
373 {
374 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
375 }
376
377 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
378
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100379 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100380
381 for (auto& inputHandle : inputHandles)
382 {
383 inputHandle->Allocate();
384 }
385
386 outputHandle->Allocate();
387
388 unsigned int nextInputId = 0;
389 for (auto& inputHandle : inputHandles)
390 {
391 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
392 ++nextInputId;
393 }
394
395 workload->PostAllocationConfigure();
396 workload->Execute();
397
398 if (needPermuteForConcat)
399 {
400 PermuteOutputForConcat<T>(workloadFactory,
401 memoryManager,
402 outputTensorInfo,
403 permuteVector,
404 std::move(outputHandle),
405 output);
406 }
407 else
408 {
409 CopyDataFromITensorHandle(output, outputHandle.get());
410 }
411}
412
413//
414// Implementation templates
415//
416
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100417template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100418LayerTestResult<T, 1> Concat1dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100419 IWorkloadFactory& workloadFactory,
420 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100421 float qScale,
422 int32_t qOffset)
423{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100424 TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100425
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100426 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
427 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
428 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100429
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100430 TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100431
432 LayerTestResult<T, 1> result(outputTensorInfo);
433
434 std::vector<T> output;
435 output.resize(outputTensorInfo.GetNumElements());
436 Concatenate<T>(workloadFactory, memoryManager,
437 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
438 { input0.data(), input1.data(), input2.data() },
439 outputTensorInfo,
440 output.data(),
441 0,
442 true);
443
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100444 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
445 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
446 {
447 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
448 },
449 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100450
451 return result;
452}
453
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100454template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100455LayerTestResult<T, 2> Concat2dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100456 IWorkloadFactory& workloadFactory,
457 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
458 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100459 unsigned int dimension,
460 const float qScale,
461 const int32_t qOffset)
462{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100463 TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100464
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100465 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
466 {
467 // Batch 0
468 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100469
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100470 // Batch 1
471 10.0f, 11.0f, 12.0f,
472 },
473 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100474
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100475 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
476 {
477 // Batch 0
478 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100479
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100480 // Batch 1
481 13.0f, 14.0f, 15.0f,
482 },
483 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100484
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100485 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
486 {
487 // Batch 0
488 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100489
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100490 // Batch 1
491 16.0f, 17.0f, 18.0f,
492 },
493 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100494
495 LayerTestResult<T, 2> result(outputTensorInfo);
496
497 std::vector<T> output;
498 output.resize(outputTensorInfo.GetNumElements());
499 Concatenate<T>(workloadFactory, memoryManager,
500 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
501 { input0.data(), input1.data(), input2.data() },
502 outputTensorInfo,
503 output.data(),
504 dimension,
505 true);
506
507 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
508 return result;
509}
510
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100511template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100512LayerTestResult<T, 2> Concat2dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100513 IWorkloadFactory& workloadFactory,
514 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100515 float qScale,
516 int32_t qOffset)
517{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100518 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100519
520 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
521 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
522
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100523 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
524 {
525 // Batch 0
526 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100527
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100528 // Batch 1
529 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100530
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100531 // Batch 2
532 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100533
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100534 // Batch 3
535 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100536
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100537 // Batch 4
538 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100539
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100540 // Batch 5
541 16.0f, 17.0f, 18.0f,
542 },
543 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100544
545 return result;
546}
547
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100548template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100549LayerTestResult<T, 2> Concat2dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100550 IWorkloadFactory& workloadFactory,
551 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100552 float qScale,
553 int32_t qOffset)
554{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100555 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100556
557 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
558 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
559
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100560 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
561 {
562 // Batch 0
563 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100564
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100565 // Batch 1
566 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
567 },
568 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100569
570 return result;
571}
572
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100573template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100574LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100575 IWorkloadFactory& workloadFactory,
576 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100577 float qScale,
578 int32_t qOffset)
579{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100580 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
581 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
582 {
583 // Batch 0
584 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100585
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100586 // Batch 1
587 10.0f, 11.0f, 12.0f,
588 },
589 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100590
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100591 TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
592 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
593 {
594 // Batch 0
595 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100596
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100597 // Batch 1
598 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100599
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100600 // Batch 0
601 7.0f, 8.0f, 9.0f,
602 },
603 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100604
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100605 TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
606 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
607 {
608 // Batch 1
609 16.0f, 17.0f, 18.0f,
610 },
611 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100612
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100613 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100614 LayerTestResult<T, 2> result(outputTensorInfo);
615
616 std::vector<T> output;
617 output.resize(outputTensorInfo.GetNumElements());
618 Concatenate<T>(workloadFactory, memoryManager,
619 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
620 { input0.data(), input1.data(), input2.data() },
621 outputTensorInfo,
622 output.data(),
623 0,
624 true);
625
626 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100627 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
628 {
629 // Batch 0
630 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100631
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100632 // Batch 1
633 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100634
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100635 // Batch 2
636 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100637
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100638 // Batch 3
639 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100640
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100641 // Batch 4
642 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100643
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100644 // Batch 5
645 16.0f, 17.0f, 18.0f,
646 },
647 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100648
649 return result;
650}
651
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100652template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100653LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100654 IWorkloadFactory& workloadFactory,
655 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100656 float qScale,
657 int32_t qOffset)
658{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100659 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
660 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
661 {
662 // Batch 0
663 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100664
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100665 // Batch 1
666 10.0f, 11.0f, 12.0f,
667 },
668 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100669
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100670 TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
671 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
672 {
673 // Batch 0
674 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100675
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100676 // Batch 1
677 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
678 },
679 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100680
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100681 TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
682 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
683 {
684 // Batch 0
685 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100686
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100687 // Batch 1
688 18.0f
689 },
690 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100691
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100692 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100693 LayerTestResult<T, 2> result(outputTensorInfo);
694
695 std::vector<T> output;
696 output.resize(outputTensorInfo.GetNumElements());
697 Concatenate<T>(workloadFactory, memoryManager,
698 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
699 { input0.data(), input1.data(), input2.data() },
700 outputTensorInfo,
701 output.data(),
702 1,
703 true);
704
705 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100706 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
707 {
708 // Batch 0
709 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100710
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100711 // Batch 1
712 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
713 },
714 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100715
716 return result;
717}
718
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100719template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100720LayerTestResult<T, 3> Concat3dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100721 IWorkloadFactory& workloadFactory,
722 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
723 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100724 unsigned int dimension,
725 bool useSubtensor,
726 float qScale,
727 int32_t qOffset)
728{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100729 TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100730
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100731 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
732 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100733 // Batch 0, Channel 0
734 1.0f, 2.0f,
735
736 // Batch 0, Channel 1
737 3.0f, 4.0f,
738
739 // Batch 0, Channel 2
740 5.0f, 6.0f,
741
742 // Batch 1, Channel 0
743 19.0f, 20.0f,
744
745 // Batch 1, Channel 1
746 21.0f, 22.0f,
747
748 // Batch 1, Channel 2
749 23.0f, 24.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100750 },
751 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100752
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100753 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
754 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100755 // Batch 0, Channel 0
756 7.0f, 8.0f,
757
758 // Batch 0, Channel 1
759 9.0f, 10.0f,
760
761 // Batch 0, Channel 2
762 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100763
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100764 // Batch 1, Channel 0
765 25.0f, 26.0f,
766
767 // Batch 1, Channel 1
768 27.0f, 28.0f,
769
770 // Batch 1, Channel 2
771 29.0f, 30.0f
772 },
773 qScale, qOffset));
774
775 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
776 {
777 // Batch 0, Channel 0
778 13.0f, 14.0f,
779
780 // Batch 0, Channel 1
781 15.0f, 16.0f,
782
783 // Batch 0, Channel 2
784 17.0f, 18.0f,
785
786 // Batch 1, Channel 0
787 31.0f, 32.0f,
788
789 // Batch 1, Channel 1
790 33.0f, 34.0f,
791
792 // Batch 1, Channel 2
793 35.0f, 36.0f
794 },
795 qScale, qOffset));
796
797 LayerTestResult<T, 3> result(outputTensorInfo);
798
799 std::vector<T> output;
800 output.resize(outputTensorInfo.GetNumElements());
801 Concatenate<T>(workloadFactory, memoryManager,
802 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
803 { input0.data(), input1.data(), input2.data() },
804 outputTensorInfo,
805 output.data(),
806 dimension,
807 useSubtensor);
808
809 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
810 return result;
811}
812
813template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
814LayerTestResult<T, 3> Concat3dDim0TestImpl(
815 IWorkloadFactory& workloadFactory,
816 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
817 float qScale,
818 int32_t qOffset)
819{
820 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
821
822 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
823 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
824
825 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
826 {
827 // Batch 0, Channel 0
828 1.0f, 2.0f,
829
830 // Batch 0, Channel 1
831 3.0f, 4.0f,
832
833 // Batch 0, Channel 2
834 5.0f, 6.0f,
835
836 // Batch 1, Channel 0
837 19.0f, 20.0f,
838
839 // Batch 1, Channel 1
840 21.0f, 22.0f,
841
842 // Batch 1, Channel 2
843 23.0f, 24.0f,
844
845 // Batch 2, Channel 0
846 7.0f, 8.0f,
847
848 // Batch 2, Channel 1
849 9.0f, 10.0f,
850
851 // Batch 2, Channel 2
852 11.0f, 12.0f,
853
854 // Batch 3, Channel 0
855 25.0f, 26.0f,
856
857 // Batch 3, Channel 1
858 27.0f, 28.0f,
859
860 // Batch 3, Channel 2
861 29.0f, 30.0f,
862
863 // Batch 4, Channel 0
864 13.0f, 14.0f,
865
866 // Batch 4, Channel 1
867 15.0f, 16.0f,
868
869 // Batch 4, Channel 2
870 17.0f, 18.0f,
871
872 // Batch 5, Channel 0
873 31.0f, 32.0f,
874
875 // Batch 5, Channel 1
876 33.0f, 34.0f,
877
878 // Batch 5, Channel 2
879 35.0f, 36.0f
880 },
881 qScale, qOffset));
882
883 return result;
884}
885
886template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
887LayerTestResult<T, 3> Concat3dDim1TestImpl(
888 IWorkloadFactory& workloadFactory,
889 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
890 float qScale,
891 int32_t qOffset)
892{
893 TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
894
895 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
896 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
897
898 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
899 {
900 // Batch 0, Channel 0
901 1.0f, 2.0f,
902
903 // Batch 0, Channel 1
904 3.0f, 4.0f,
905
906 // Batch 0, Channel 2
907 5.0f, 6.0f,
908
909 // Batch 0, Channel 3
910 7.0f, 8.0f,
911
912 // Batch 0, Channel 4
913 9.0f, 10.0f,
914
915 // Batch 0, Channel 5
916 11.0f, 12.0f,
917
918 // Batch 0, Channel 6
919 13.0f, 14.0f,
920
921 // Batch 0, Channel 7
922 15.0f, 16.0f,
923
924 // Batch 0, Channel 8
925 17.0f, 18.0f,
926
927 // Batch 1, Channel 0
928 19.0f, 20.0f,
929
930 // Batch 1, Channel 1
931 21.0f, 22.0f,
932
933 // Batch 1, Channel 2
934 23.0f, 24.0f,
935
936 // Batch 1, Channel 3
937 25.0f, 26.0f,
938
939 // Batch 1, Channel 4
940 27.0f, 28.0f,
941
942 // Batch 1, Channel 5
943 29.0f, 30.0f,
944
945 // Batch 1, Channel 6
946 31.0f, 32.0f,
947
948 // Batch 1, Channel 7
949 33.0f, 34.0f,
950
951 // Batch 1, Channel 8
952 35.0f, 36.0f
953 },
954 qScale, qOffset));
955
956 return result;
957}
958
959template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
960LayerTestResult<T, 3> Concat3dDim2TestImpl(
961 IWorkloadFactory& workloadFactory,
962 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
963 bool useSubtensor,
964 float qScale,
965 int32_t qOffset)
966{
967 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
968
969 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
970 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
971
972 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
973 {
974 // Batch 0, Channel 0
975 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
976
977 // Batch 0, Channel 1
978 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
979
980 // Batch 0, Channel 2
981 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
982
983 // Batch 1, Channel 0
984 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
985
986 // Batch 1, Channel 1
987 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
988
989 // Batch 1, Channel 2
990 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
991 },
992 qScale, qOffset));
993
994 return result;
995}
996
997template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
998LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
999 IWorkloadFactory& workloadFactory,
1000 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1001 float qScale,
1002 int32_t qOffset)
1003{
1004 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
1005 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1006 {
1007 // Batch 0, Channel 0
1008 1.0f, 2.0f,
1009
1010 // Batch 0, Channel 1
1011 3.0f, 4.0f,
1012
1013 // Batch 0, Channel 2
1014 5.0f, 6.0f,
1015
1016 // Batch 1, Channel 0
1017 19.0f, 20.0f,
1018
1019 // Batch 1, Channel 1
1020 21.0f, 22.0f,
1021
1022 // Batch 1, Channel 2
1023 23.0f, 24.0f
1024 },
1025 qScale, qOffset));
1026
1027 TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1028 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1029 {
1030 // Batch 0, Channel 0
1031 7.0f, 8.0f,
1032
1033 // Batch 0, Channel 1
1034 9.0f, 10.0f,
1035
1036 // Batch 0, Channel 2
1037 11.0f, 12.0f,
1038 },
1039 qScale, qOffset));
1040
1041 TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1042 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1043 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001044 // Batch 0, Channel 0
1045 25.0f, 26.0f,
1046
1047 // Batch 0, Channel 1
1048 27.0f, 28.0f,
1049
1050 // Batch 0, Channel 2
1051 29.0f, 30.0f,
1052
1053 // Batch 1, Channel 0
1054 13.0f, 14.0f,
1055
1056 // Batch 1, Channel 1
1057 15.0f, 16.0f,
1058
1059 // Batch 1, Channel 2
1060 17.0f, 18.0f,
1061
1062 // Batch 2, Channel 0
1063 31.0f, 32.0f,
1064
1065 // Batch 2, Channel 1
1066 33.0f, 34.0f,
1067
1068 // Batch 2, Channel 2
1069 35.0f, 36.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001070 },
1071 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001072
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001073 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001074 LayerTestResult<T, 3> result(outputTensorInfo);
1075
1076 std::vector<T> output;
1077 output.resize(outputTensorInfo.GetNumElements());
1078 Concatenate<T>(workloadFactory, memoryManager,
1079 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1080 { input0.data(), input1.data(), input2.data() },
1081 outputTensorInfo,
1082 output.data(),
1083 0,
1084 true);
1085
1086 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001087 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1088 {
1089 // Batch 0, Channel 0
1090 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001091
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001092 // Batch 0, Channel 1
1093 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001094
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001095 // Batch 0, Channel 2
1096 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001097
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001098 // Batch 1, Channel 0
1099 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001100
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001101 // Batch 1, Channel 1
1102 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001103
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001104 // Batch 1, Channel 2
1105 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001106
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001107 // Batch 2, Channel 0
1108 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001109
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001110 // Batch 2, Channel 1
1111 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001112
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001113 // Batch 2, Channel 2
1114 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001115
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001116 // Batch 3, Channel 0
1117 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001118
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001119 // Batch 3, Channel 1
1120 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001121
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001122 // Batch 3, Channel 2
1123 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001124
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001125 // Batch 4, Channel 0
1126 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001127
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001128 // Batch 4, Channel 1
1129 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001130
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001131 // Batch 4, Channel 2
1132 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001133
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001134 // Batch 5, Channel 0
1135 31.0f, 32.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001136
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001137 // Batch 5, Channel 1
1138 33.0f, 34.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001139
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001140 // Batch 5, Channel 2
1141 35.0f, 36.0f
1142 },
1143 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001144
1145 return result;
1146}
1147
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001148template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001149LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001150 IWorkloadFactory& workloadFactory,
1151 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001152 float qScale,
1153 int32_t qOffset)
1154{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001155 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1156 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1157 {
1158 // Batch 0, Channel 0
1159 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001160
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001161 // Batch 0, Channel 1
1162 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001163
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001164 // Batch 0, Channel 2
1165 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001166
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001167 // Batch 1, Channel 0
1168 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001169
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001170 // Batch 1, Channel 1
1171 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001172
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001173 // Batch 1, Channel 2
1174 23.0f, 24.0f
1175 },
1176 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001177
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001178 TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1179 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1180 {
1181 // Batch 0, Channel 0
1182 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001183
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001184 // Batch 0, Channel 1
1185 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001186
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001187 // Batch 0, Channel 2
1188 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001189
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001190 // Batch 0, Channel 3
1191 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001192
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001193 // Batch 1, Channel 0
1194 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001195
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001196 // Batch 1, Channel 1
1197 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001198
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001199 // Batch 1, Channel 2
1200 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001201
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001202 // Batch 1, Channel 3
1203 15.0f, 16.0f,
1204 },
1205 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001206
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001207 TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1208 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1209 {
1210 // Batch 0, Channel 0
1211 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001212
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001213 // Batch 1, Channel 0
1214 31.0f, 32.0f,
1215 },
1216 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001217
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001218 TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001219 LayerTestResult<T, 3> result(outputTensorInfo);
1220
1221 std::vector<T> output;
1222 output.resize(outputTensorInfo.GetNumElements());
1223 Concatenate<T>(workloadFactory, memoryManager,
1224 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1225 { input0.data(), input1.data(), input2.data() },
1226 outputTensorInfo,
1227 output.data(),
1228 1,
1229 true);
1230
1231 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001232 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1233 {
1234 // Batch 0, Channel 0
1235 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001236
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001237 // Batch 0, Channel 1
1238 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001239
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001240 // Batch 0, Channel 2
1241 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001242
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001243 // Batch 0, Channel 3
1244 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001245
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001246 // Batch 0, Channel 4
1247 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001248
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001249 // Batch 0, Channel 5
1250 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001251
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001252 // Batch 0, Channel 6
1253 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001254
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001255 // Batch 0, Channel 7
1256 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001257
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001258 // Batch 1, Channel 0
1259 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001260
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001261 // Batch 1, Channel 1
1262 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001263
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001264 // Batch 1, Channel 2
1265 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001266
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001267 // Batch 1, Channel 3
1268 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001269
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001270 // Batch 1, Channel 4
1271 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001272
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001273 // Batch 1, Channel 5
1274 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001275
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001276 // Batch 1, Channel 6
1277 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001278
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001279 // Batch 1, Channel 7
1280 31.0f, 32.0f,
1281 },
1282 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001283
1284 return result;
1285}
1286
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001287template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001288LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001289 IWorkloadFactory& workloadFactory,
1290 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001291 bool useSubtensor,
1292 float qScale,
1293 int32_t qOffset)
1294{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001295 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1296 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1297 {
1298 // Batch 0, Channel 0
1299 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001300
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001301 // Batch 0, Channel 1
1302 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001303
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001304 // Batch 0, Channel 2
1305 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001306
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001307 // Batch 1, Channel 0
1308 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001309
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001310 // Batch 1, Channel 1
1311 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001312
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001313 // Batch 1, Channel 2
1314 23.0f, 24.0f
1315 },
1316 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001317
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001318 TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1319 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1320 {
1321 // Batch 0, Channel 0
1322 7.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001323
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001324 // Batch 0, Channel 1
1325 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001326
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001327 // Batch 0, Channel 2
1328 11.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001329
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001330 // Batch 1, Channel 0
1331 25.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001332
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001333 // Batch 1, Channel 1
1334 27.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001335
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001336 // Batch 1, Channel 2
1337 29.0f
1338 },
1339 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001340
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001341 TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1342 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1343 {
1344 // Batch 0, Channel 0
1345 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001346
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001347 // Batch 0, Channel 1
1348 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001349
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001350 // Batch 0, Channel 2
1351 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001352
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001353 // Batch 1, Channel 0
1354 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001355
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001356 // Batch 1, Channel 1
1357 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001358
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001359 // Batch 1, Channel 2
1360 35.0f, 36.0f, 55.0f,
1361 },
1362 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001363
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001364 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001365 LayerTestResult<T, 3> result(outputTensorInfo);
1366
1367 std::vector<T> output;
1368 output.resize(outputTensorInfo.GetNumElements());
1369 Concatenate<T>(workloadFactory, memoryManager,
1370 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1371 { input0.data(), input1.data(), input2.data() },
1372 outputTensorInfo,
1373 output.data(),
1374 2,
1375 useSubtensor);
1376
1377 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001378 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1379 {
1380 // Batch 0, Channel 0
1381 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001382
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001383 // Batch 0, Channel 1
1384 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001385
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001386 // Batch 0, Channel 2
1387 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001388
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001389 // Batch 1, Channel 0
1390 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001391
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001392 // Batch 1, Channel 1
1393 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001394
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001395 // Batch 1, Channel 2
1396 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1397 },
1398 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001399
1400 return result;
1401}
1402
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001403template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001404LayerTestResult<T, 4> Concat4dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001405 IWorkloadFactory& workloadFactory,
1406 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1407 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001408 unsigned int dimension,
1409 bool useSubtensor,
1410 float qScale,
1411 int32_t qOffset)
1412{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001413 TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001414
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001415 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1416 {
1417 1.0f, 2.0f,
1418 3.0f, 4.0f,
1419 5.0f, 6.0f,
1420 7.0f, 8.0f,
1421 9.0f, 10.0f,
1422 11.0f, 12.0f
1423 },
1424 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001425
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001426 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1427 {
1428 11.0f, 12.0f,
1429 13.0f, 14.0f,
1430 15.0f, 16.0f,
1431 17.0f, 18.0f,
1432 19.0f, 20.0f,
1433 21.0f, 22.0f
1434 },
1435 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001436
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001437 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1438 {
1439 21.0f, 22.0f,
1440 23.0f, 24.0f,
1441 25.0f, 26.0f,
1442 27.0f, 28.0f,
1443 29.0f, 30.0f,
1444 31.0f, 32.0f
1445 },
1446 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001447
1448 LayerTestResult<T, 4> result(outputTensorInfo);
1449
1450 std::vector<T> output;
1451 output.resize(outputTensorInfo.GetNumElements());
1452
1453 Concatenate<T>(workloadFactory,
1454 memoryManager,
1455 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1456 {input0.data(), input1.data(), input2.data()},
1457 outputTensorInfo,
1458 output.data(),
1459 dimension,
1460 useSubtensor);
1461
1462 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1463 return result;
1464}
1465
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001466template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001467LayerTestResult<T, 4> Concat4dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001468 IWorkloadFactory& workloadFactory,
1469 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001470 float qScale,
1471 int32_t qOffset)
1472{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001473 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001474
1475 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1476 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1477
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001478 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1479 {
1480 1.0f, 2.0f,
1481 3.0f, 4.0f,
1482 5.0f, 6.0f,
1483 7.0f, 8.0f,
1484 9.0f, 10.0f,
1485 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001486
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001487 11.0f, 12.0f,
1488 13.0f, 14.0f,
1489 15.0f, 16.0f,
1490 17.0f, 18.0f,
1491 19.0f, 20.0f,
1492 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001493
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001494 21.0f, 22.0f,
1495 23.0f, 24.0f,
1496 25.0f, 26.0f,
1497 27.0f, 28.0f,
1498 29.0f, 30.0f,
1499 31.0f, 32.0f
1500 },
1501 qScale, qOffset));
1502
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001503 return result;
1504}
1505
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001506template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001507LayerTestResult<T, 4> Concat4dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001508 IWorkloadFactory& workloadFactory,
1509 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001510 float qScale,
1511 int32_t qOffset)
1512{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001513 TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001514
1515 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1516 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1517
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001518 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1519 {
1520 1.0f, 2.0f,
1521 3.0f, 4.0f,
1522 5.0f, 6.0f,
1523 7.0f, 8.0f,
1524 9.0f, 10.0f,
1525 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001526
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001527 11.0f, 12.0f,
1528 13.0f, 14.0f,
1529 15.0f, 16.0f,
1530 17.0f, 18.0f,
1531 19.0f, 20.0f,
1532 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001533
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001534 21.0f, 22.0f,
1535 23.0f, 24.0f,
1536 25.0f, 26.0f,
1537 27.0f, 28.0f,
1538 29.0f, 30.0f,
1539 31.0f, 32.0f
1540 },
1541 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001542
1543 return result;
1544}
1545
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001546template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001547LayerTestResult<T, 4> Concat4dDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001548 IWorkloadFactory& workloadFactory,
1549 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001550 float qScale,
1551 int32_t qOffset)
1552{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001553 TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001554
1555 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1556 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1557
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001558 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1559 {
1560 1.0f, 2.0f,
1561 3.0f, 4.0f,
1562 11.0f, 12.0f,
1563 13.0f, 14.0f,
1564 21.0f, 22.0f,
1565 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001566
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001567 5.0f, 6.0f,
1568 7.0f, 8.0f,
1569 15.0f, 16.0f,
1570 17.0f, 18.0f,
1571 25.0f, 26.0f,
1572 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001573
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001574 9.0f, 10.0f,
1575 11.0f, 12.0f,
1576 19.0f, 20.0f,
1577 21.0f, 22.0f,
1578 29.0f, 30.0f,
1579 31.0f, 32.0f
1580 },
1581 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001582
1583 return result;
1584}
1585
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001586template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001587LayerTestResult<T, 4> Concat4dDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001588 IWorkloadFactory& workloadFactory,
1589 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001590 float qScale,
1591 int32_t qOffset,
1592 bool useSubtensor)
1593{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001594 TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001595
1596 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1597 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1598
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001599 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1600 {
1601 1.0f, 2.0f,
1602 11.0f, 12.0f,
1603 21.0f, 22.0f,
1604 3.0f, 4.0f,
1605 13.0f, 14.0f,
1606 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001607
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001608 5.0f, 6.0f,
1609 15.0f, 16.0f,
1610 25.0f, 26.0f,
1611 7.0f, 8.0f,
1612 17.0f, 18.0f,
1613 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001614
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001615 9.0f, 10.0f,
1616 19.0f, 20.0f,
1617 29.0f, 30.0f,
1618 11.0f, 12.0f,
1619 21.0f, 22.0f,
1620 31.0f, 32.0f
1621 },
1622 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001623
1624 return result;
1625}
1626
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001627template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001628LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001629 IWorkloadFactory& workloadFactory,
1630 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001631 float qScale,
1632 int32_t qOffset)
1633{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001634 constexpr unsigned int dimension = 0u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001635
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001636 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1637 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1638 {
1639 1.0f, 2.0f,
1640 3.0f, 4.0f,
1641 5.0f, 6.0f,
1642 7.0f, 8.0f,
1643 9.0f, 10.0f,
1644 11.0f, 12.0f
1645 },
1646 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001647
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001648 TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001649
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001650 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1651 {
1652 11.0f, 12.0f,
1653 13.0f, 14.0f,
1654 15.0f, 16.0f,
1655 17.0f, 18.0f,
1656 19.0f, 20.0f,
1657 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001658
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001659 21.0f, 22.0f,
1660 23.0f, 24.0f,
1661 25.0f, 26.0f,
1662 27.0f, 28.0f,
1663 29.0f, 30.0f,
1664 31.0f, 32.0f
1665 },
1666 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001667
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001668 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001669
1670 LayerTestResult<T, 4> result(outputTensorInfo);
1671
1672 std::vector<T> output;
1673 output.resize(outputTensorInfo.GetNumElements());
1674 Concatenate<T>(workloadFactory,
1675 memoryManager,
1676 {inputTensorInfo0, inputTensorInfo1},
1677 {input0.data(), input1.data()},
1678 outputTensorInfo,
1679 output.data(),
1680 dimension,
1681 true);
1682
1683 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001684 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1685 {
1686 1.0f, 2.0f,
1687 3.0f, 4.0f,
1688 5.0f, 6.0f,
1689 7.0f, 8.0f,
1690 9.0f, 10.0f,
1691 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001692
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001693 11.0f, 12.0f,
1694 13.0f, 14.0f,
1695 15.0f, 16.0f,
1696 17.0f, 18.0f,
1697 19.0f, 20.0f,
1698 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001699
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001700 21.0f, 22.0f,
1701 23.0f, 24.0f,
1702 25.0f, 26.0f,
1703 27.0f, 28.0f,
1704 29.0f, 30.0f,
1705 31.0f, 32.0f
1706 },
1707 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001708
1709 return result;
1710}
1711
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001712template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001713LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001714 IWorkloadFactory& workloadFactory,
1715 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001716 float qScale,
1717 int32_t qOffset)
1718{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001719 constexpr unsigned int dimension = 1u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001720
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001721 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1722 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1723 {
1724 1.0f, 2.0f,
1725 3.0f, 4.0f,
1726 5.0f, 6.0f,
1727 7.0f, 8.0f,
1728 9.0f, 10.0f,
1729 11.0f, 12.0f
1730 },
1731 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001732
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001733 TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001734
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001735 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1736 {
1737 11.0f, 12.0f,
1738 13.0f, 14.0f,
1739 15.0f, 16.0f,
1740 17.0f, 18.0f,
1741 },
1742 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001743
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001744 TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001745
1746 LayerTestResult<T, 4> result(outputTensorInfo);
1747
1748 std::vector<T> output;
1749 output.resize(outputTensorInfo.GetNumElements());
1750 Concatenate<T>(workloadFactory,
1751 memoryManager,
1752 {inputTensorInfo0, inputTensorInfo1},
1753 {input0.data(), input1.data()},
1754 outputTensorInfo,
1755 output.data(),
1756 dimension,
1757 true);
1758
1759 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001760 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1761 {
1762 1.0f, 2.0f,
1763 3.0f, 4.0f,
1764 5.0f, 6.0f,
1765 7.0f, 8.0f,
1766 9.0f, 10.0f,
1767 11.0f, 12.0f,
1768 11.0f, 12.0f,
1769 13.0f, 14.0f,
1770 15.0f, 16.0f,
1771 17.0f, 18.0f
1772 },
1773 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001774
1775 return result;
1776}
1777
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001778template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001779LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001780 IWorkloadFactory& workloadFactory,
1781 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001782 float qScale,
1783 int32_t qOffset)
1784{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001785 constexpr unsigned int dimension = 2u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001786
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001787 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1788 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1789 {
1790 1.0f, 2.0f,
1791 3.0f, 4.0f,
1792 5.0f, 6.0f,
1793 7.0f, 8.0f,
1794 9.0f, 10.0f,
1795 11.0f, 12.0f
1796 },
1797 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001798
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001799 TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1800 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1801 {
1802 11.0f, 12.0f,
1803 13.0f, 14.0f,
1804 15.0f, 16.0f,
1805 17.0f, 18.0f,
1806 19.0f, 20.0f,
1807 21.0f, 22.0f,
1808 23.0f, 24.0f,
1809 25.0f, 26.0f,
1810 27.0f, 28.0f
1811 },
1812 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001813
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001814 TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001815 LayerTestResult<T, 4> result(outputTensorInfo);
1816
1817 std::vector<T> output;
1818 output.resize(outputTensorInfo.GetNumElements());
1819 Concatenate<T>(workloadFactory,
1820 memoryManager,
1821 {inputTensorInfo0, inputTensorInfo1},
1822 {input0.data(), input1.data()},
1823 outputTensorInfo,
1824 output.data(),
1825 dimension,
1826 true);
1827
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001828 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1829 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1830 {
1831 1.0f, 2.0f,
1832 3.0f, 4.0f,
1833 11.0f, 12.0f,
1834 13.0f, 14.0f,
1835 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001836
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001837 5.0f, 6.0f,
1838 7.0f, 8.0f,
1839 17.0f, 18.0f,
1840 19.0f, 20.0f,
1841 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001842
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001843 9.0f, 10.0f,
1844 11.0f, 12.0f,
1845 23.0f, 24.0f,
1846 25.0f, 26.0f,
1847 27.0f, 28.0f
1848 },
1849 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001850
1851 return result;
1852}
1853
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001854template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001855LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001856 IWorkloadFactory& workloadFactory,
1857 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001858 float qScale,
1859 int32_t qOffset,
1860 bool useSubtensor)
1861{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001862 constexpr unsigned int dimension = 3u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001863
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001864 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1865 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1866 {
1867 1.0f, 2.0f,
1868 3.0f, 4.0f,
1869 5.0f, 6.0f,
1870 7.0f, 8.0f,
1871 9.0f, 10.0f,
1872 11.0f, 12.0f
1873 },
1874 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001875
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001876 TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1877 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1878 {
1879 11.0f, 12.0f, 13.0f,
1880 14.0f, 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001881
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001882 17.0f, 18.0f, 19.0f,
1883 20.0f, 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001884
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001885 23.0f, 24.0f, 25.0f,
1886 26.0f, 27.0f, 28.0f
1887 },
1888 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001889
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001890 TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001891
1892 LayerTestResult<T, 4> result(outputTensorInfo);
1893
1894 std::vector<T> output;
1895 output.resize(outputTensorInfo.GetNumElements());
1896 Concatenate<T>(workloadFactory,
1897 memoryManager,
1898 {inputTensorInfo0, inputTensorInfo1},
1899 {input0.data(), input1.data()},
1900 outputTensorInfo,
1901 output.data(),
1902 dimension,
1903 useSubtensor);
1904
1905 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001906 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1907 {
1908 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1909 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1910 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1911 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1912 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1913 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1914 },
1915 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001916
1917 return result;
1918}
1919
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001920template<DataType ArmnnType, typename T>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001921LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001922 IWorkloadFactory& workloadFactory,
1923 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001924 bool useSubtensor)
1925{
Jan Eilers8eb25602020-03-09 12:13:48 +00001926 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00001927
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001928 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001929 TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1930 TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1931 TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001932
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001933 std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001934
1935 // Quantized input1 tensor.
1936 const float inputScale1 = 0.5f;
1937 const int32_t inputOffset1 = 5;
1938
1939 auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1940 {
1941 1, 2, 3,
1942 4, 5, 6,
1943 7, 8, 9,
1944 10, 11, 12,
1945 13, 14, 15,
1946 16, 17, 18,
1947
1948 19, 20, 21,
1949 22, 23, 24,
1950 25, 26, 27,
1951 28, 29, 30,
1952 31, 32, 33,
1953 34, 35, 36
1954 }));
1955
1956 // Quatized input2 tensor.
1957 const float inputScale2 = 0.2f;
1958 const int32_t inputOffset2 = 10;
1959
1960 auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1961 {
1962 37, 38, 39,
1963 40, 41, 42,
1964 43, 44, 45,
1965 46, 47, 48,
1966 49, 50, 51,
1967 52, 53, 54
1968 }));
1969
1970 // Quantized output tensor.
1971 const float outputScale = 0.1f;
1972 const int32_t outputOffset = 20;
1973
1974 LayerTestResult<T, 3> ret(outputTensorInfo);
1975
1976 ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1977 {
1978 0, 5, 74,
1979 10, 15, 76,
1980 20, 25, 78,
1981 30, 35, 80,
1982 40, 45, 82,
1983 50, 55, 84,
1984
1985 60, 65, 86,
1986 70, 75, 88,
1987 80, 85, 90,
1988 90, 95, 92,
1989 100, 105, 94,
1990 110, 115, 96,
1991
1992 120, 125, 98,
1993 130, 135, 100,
1994 140, 145, 102,
1995 150, 155, 104,
1996 160, 165, 106,
1997 170, 175, 108
1998 }));
1999
2000 outputTensorInfo.SetQuantizationScale(outputScale);
2001 outputTensorInfo.SetQuantizationOffset(outputOffset);
2002 inputTensorInfo1.SetQuantizationScale(inputScale1);
2003 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2004 inputTensorInfo2.SetQuantizationScale(inputScale2);
2005 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2006
2007 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002008 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002009
2010 std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002011 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002012 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002013 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002014 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002015 bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002016 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002017 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002018 subTensorsSupported ?
2019 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2020 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2021
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002022 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002023 subTensorsSupported ?
2024 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2025 workloadFactory.CreateTensorHandle(inputTensorInfo2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002026 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002027 ConcatQueueDescriptor data;
2028 OriginsDescriptor desc = CreateDescriptorForConcatenation(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002029 inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2030 data.m_Parameters = desc;
2031
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002032 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002033 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2034 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2035 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2036
2037 data.m_ViewOrigins.push_back(window1);
2038 data.m_ViewOrigins.push_back(window2);
2039
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002040 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002041
2042 inputHandle1->Allocate();
2043 inputHandle2->Allocate();
2044 outputHandle->Allocate();
2045
2046 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2047 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2048
2049 workload->PostAllocationConfigure();
2050 workload->Execute();
2051
2052 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2053
2054 return ret;
2055}
2056
2057//
2058// Explicit template specializations
2059//
2060
Derek Lambertif90c56d2020-01-10 17:14:08 +00002061template LayerTestResult<ResolveType<DataType::QAsymmU8>, 3>
2062ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002063 IWorkloadFactory& workloadFactory,
2064 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002065 bool useSubtensor);
2066
Derek Lambertif90c56d2020-01-10 17:14:08 +00002067template LayerTestResult<ResolveType<DataType::QSymmS16>, 3>
2068ConcatDifferentInputOutputQParamTest<DataType::QSymmS16>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002069 IWorkloadFactory& workloadFactory,
2070 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002071 bool useSubtensor);
2072
2073//
2074// Implementation functions
2075//
2076
2077LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002078 IWorkloadFactory& workloadFactory,
2079 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002080{
Jan Eilers8eb25602020-03-09 12:13:48 +00002081 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002082
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002083 unsigned int outputWidth = 3;
2084 unsigned int outputHeight = 6;
2085 unsigned int outputChannels = 3;
2086
2087 unsigned int inputWidth1 = 3;
2088 unsigned int inputHeight1 = 6;
2089 unsigned int inputChannels1 = 2;
2090
2091 unsigned int inputWidth2 = 3;
2092 unsigned int inputHeight2 = 6;
2093 unsigned int inputChannels2 = 1;
2094
2095 // Define the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002096 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2097 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2098 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002099
2100 LayerTestResult<float,3> ret(outputTensorInfo);
2101
2102 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2103 {
2104 1.0f, 2.0f, 3.0f,
2105 4.0f, 5.0f, 6.0f,
2106 7.0f, 8.0f, 9.0f,
2107 10.0f, 11.0f, 12.0f,
2108 13.0f, 14.0f, 15.0f,
2109 16.0f, 17.0f, 18.0f,
2110
2111 19.0f, 20.0f, 21.0f,
2112 22.0f, 23.0f, 24.0f,
2113 25.0f, 26.0f, 27.0f,
2114 28.0f, 29.0f, 30.0f,
2115 31.0f, 32.0f, 33.0f,
2116 34.0f, 35.0f, 36.0f,
2117
2118 37.0f, 38.0f, 39.0f,
2119 40.0f, 41.0f, 42.0f,
2120 43.0f, 44.0f, 45.0f,
2121 46.0f, 47.0f, 48.0f,
2122 49.0f, 50.0f, 51.0f,
2123 52.0f, 53.0f, 54.0f,
2124 })
2125 );
2126
2127 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2128 {
2129 1.0f, 2.0f, 3.0f,
2130 4.0f, 5.0f, 6.0f,
2131 7.0f, 8.0f, 9.0f,
2132 10.0f, 11.0f, 12.0f,
2133 13.0f, 14.0f, 15.0f,
2134 16.0f, 17.0f, 18.0f,
2135
2136 19.0f, 20.0f, 21.0f,
2137 22.0f, 23.0f, 24.0f,
2138 25.0f, 26.0f, 27.0f,
2139 28.0f, 29.0f, 30.0f,
2140 31.0f, 32.0f, 33.0f,
2141 34.0f, 35.0f, 36.0f,
2142 })
2143 );
2144
2145 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2146 {
2147 37.0f, 38.0f, 39.0f,
2148 40.0f, 41.0f, 42.0f,
2149 43.0f, 44.0f, 45.0f,
2150 46.0f, 47.0f, 48.0f,
2151 49.0f, 50.0f, 51.0f,
2152 52.0f, 53.0f, 54.0f,
2153 })
2154 );
2155
2156 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002157 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002158
2159 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002160 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002161 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002162 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002163
2164 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2165
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002166 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002167 subTensorsSupported ?
2168 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2169 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2170
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002171 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002172 subTensorsSupported ?
2173 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2174 workloadFactory.CreateTensorHandle(inputTensorInfo2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002175 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002176 ConcatQueueDescriptor data;
2177 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002178 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2179 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2180 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2181
2182 data.m_ViewOrigins.push_back(window1);
2183 data.m_ViewOrigins.push_back(window2);
2184
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002185 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002186
2187 inputHandle1->Allocate();
2188 inputHandle2->Allocate();
2189 outputHandle->Allocate();
2190
2191 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2192 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2193
2194 workload->PostAllocationConfigure();
2195 workload->Execute();
2196
2197 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2198
2199 return ret;
2200}
2201
2202LayerTestResult<float, 1> Concat1dTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002203 IWorkloadFactory& workloadFactory,
2204 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002205{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002206 return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002207}
2208
2209LayerTestResult<float, 2> Concat2dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002210 IWorkloadFactory& workloadFactory,
2211 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002212{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002213 return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002214}
2215
2216LayerTestResult<float, 2> Concat2dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002217 IWorkloadFactory& workloadFactory,
2218 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002219{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002220 return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002221}
2222
2223LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002224 IWorkloadFactory& workloadFactory,
2225 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002226{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002227 return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002228}
2229
2230LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002231 IWorkloadFactory& workloadFactory,
2232 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002233{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002234 return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002235}
2236
2237LayerTestResult<float, 3> Concat3dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002238 IWorkloadFactory& workloadFactory,
2239 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002240{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002241 return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002242}
2243
2244LayerTestResult<float, 3> Concat3dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002245 IWorkloadFactory& workloadFactory,
2246 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002247{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002248 return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002249}
2250
2251LayerTestResult<float, 3> Concat3dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002252 IWorkloadFactory& workloadFactory,
2253 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002254 bool useSubtensor)
2255{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002256 return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002257}
2258
2259LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002260 IWorkloadFactory& workloadFactory,
2261 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002262{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002263 return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002264 workloadFactory, memoryManager, 0.0f, 0);
2265}
2266
2267LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002268 IWorkloadFactory& workloadFactory,
2269 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002270{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002271 return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002272}
2273
2274LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002275 IWorkloadFactory& workloadFactory,
2276 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002277 bool useSubtensor)
2278{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002279 return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002280 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2281}
2282
2283LayerTestResult<float, 4> Concat4dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002284 IWorkloadFactory& workloadFactory,
2285 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002286{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002287 return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002288}
2289
2290LayerTestResult<float, 4> Concat4dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002291 IWorkloadFactory& workloadFactory,
2292 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002293{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002294 return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002295}
2296
2297LayerTestResult<float, 4> Concat4dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002298 IWorkloadFactory& workloadFactory,
2299 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002300{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002301 return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002302}
2303
2304LayerTestResult<float, 4> Concat4dDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002305 IWorkloadFactory& workloadFactory,
2306 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002307 bool useSubtensor)
2308{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002309 return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002310}
2311
2312LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002313 IWorkloadFactory& workloadFactory,
2314 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002315{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002316 return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002317}
2318
2319LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002320 IWorkloadFactory& workloadFactory,
2321 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002322{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002323 return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002324 workloadFactory, memoryManager, 0.0f, 0);
2325}
2326
2327LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002328 IWorkloadFactory& workloadFactory,
2329 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002330{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002331 return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002332}
2333
2334LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002335 IWorkloadFactory& workloadFactory,
2336 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002337 bool useSubtensor)
2338{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002339 return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002340 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2341}
2342
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002343LayerTestResult<Half, 3> ConcatFloat16Test(
2344 IWorkloadFactory& workloadFactory,
2345 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matthew Jackson9bff1442019-09-12 09:08:23 +01002346{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002347 return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
Matthew Jackson9bff1442019-09-12 09:08:23 +01002348}
2349
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00002350LayerTestResult<BFloat16, 3> ConcatBFloat16Test(
2351 IWorkloadFactory& workloadFactory,
2352 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2353{
2354 return Concat3dDim1TestImpl<DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
2355}
2356
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002357LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002358 IWorkloadFactory& workloadFactory,
2359 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002360{
Jan Eilers8eb25602020-03-09 12:13:48 +00002361 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002362
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002363 unsigned int outputWidth = 3;
2364 unsigned int outputHeight = 6;
2365 unsigned int outputChannels = 3;
2366
2367 unsigned int inputWidth1 = 3;
2368 unsigned int inputHeight1 = 6;
2369 unsigned int inputChannels1 = 2;
2370
2371 unsigned int inputWidth2 = 3;
2372 unsigned int inputHeight2 = 6;
2373 unsigned int inputChannels2 = 1;
2374
2375 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002376 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2377 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2378 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002379
2380 // Quantized input1 tensor. Range [-3, 1]
2381 const float inputScale1 = 0.015686f;
2382 const int32_t inputOffset1 = 192;
2383
2384 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2385 {
2386 1, 2, 3,
2387 4, 5, 6,
2388 7, 8, 9,
2389 10, 11, 12,
2390 13, 14, 15,
2391 16, 17, 18,
2392
2393 19, 20, 21,
2394 22, 23, 24,
2395 25, 26, 27,
2396 28, 29, 30,
2397 31, 32, 33,
2398 34, 35, 36,
2399 })
2400 );
2401
2402 // Quatized input2 tensor. Range [-1, 4]
2403 const float inputScale2 = 0.019608f;
2404 const int32_t inputOffset2 = 50;
2405
2406 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2407 {
2408 37, 38, 39,
2409 40, 41, 42,
2410 43, 44, 45,
2411 46, 47, 48,
2412 49, 50, 51,
2413 52, 53, 54,
2414 })
2415 );
2416
2417 // Output has the same quantization parameters than input1,
2418 // so that only the requantization of input2 is required
2419 const float outputScale = 0.015686f;
2420 const int32_t outputOffset = 192;
2421
2422 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2423
2424 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2425 {
2426 1, 2, 3,
2427 4, 5, 6,
2428 7, 8, 9,
2429 10, 11, 12,
2430 13, 14, 15,
2431 16, 17, 18,
2432
2433 19, 20, 21,
2434 22, 23, 24,
2435 25, 26, 27,
2436 28, 29, 30,
2437 31, 32, 33,
2438 34, 35, 36,
2439
2440 176, 177, 178,
2441 179, 181, 182,
2442 183, 184, 186,
2443 187, 188, 189,
2444 191, 192, 193,
2445 195, 196, 197,
2446 })
2447 );
2448
2449 outputTensorInfo.SetQuantizationScale(outputScale);
2450 outputTensorInfo.SetQuantizationOffset(outputOffset);
2451 inputTensorInfo1.SetQuantizationScale(inputScale1);
2452 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2453 inputTensorInfo2.SetQuantizationScale(inputScale2);
2454 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2455
2456 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002457 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002458
2459 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002460 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002461 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002462 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002463
2464 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2465
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002466 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002467 subTensorsSupported ?
2468 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2469 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2470
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002471 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002472 subTensorsSupported ?
2473 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2474 workloadFactory.CreateTensorHandle(inputTensorInfo2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002475 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002476 ConcatQueueDescriptor data;
2477 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002478 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2479 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2480 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2481
2482 data.m_ViewOrigins.push_back(window1);
2483 data.m_ViewOrigins.push_back(window2);
2484
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002485 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002486
2487 inputHandle1->Allocate();
2488 inputHandle2->Allocate();
2489 outputHandle->Allocate();
2490
2491 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2492 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2493
2494 workload->PostAllocationConfigure();
2495 workload->Execute();
2496
2497 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2498
2499 return ret;
2500}
2501
2502LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002503 IWorkloadFactory& workloadFactory,
2504 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002505{
Jan Eilers8eb25602020-03-09 12:13:48 +00002506 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002507
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002508 unsigned int outputWidth = 3;
2509 unsigned int outputHeight = 6;
2510 unsigned int outputChannels = 3;
2511
2512 unsigned int inputWidth1 = 3;
2513 unsigned int inputHeight1 = 6;
2514 unsigned int inputChannels1 = 2;
2515
2516 unsigned int inputWidth2 = 3;
2517 unsigned int inputHeight2 = 6;
2518 unsigned int inputChannels2 = 1;
2519
2520 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002521 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QAsymmU8);
2522 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QAsymmU8);
2523 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QAsymmU8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002524
2525 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2526 const float scale = 0.13497836f;
2527 const int32_t offset = -7;
2528
2529 outputTensorInfo.SetQuantizationScale(scale);
2530 outputTensorInfo.SetQuantizationOffset(offset);
2531 inputTensorInfo1.SetQuantizationScale(scale);
2532 inputTensorInfo1.SetQuantizationOffset(offset);
2533 inputTensorInfo2.SetQuantizationScale(scale);
2534 inputTensorInfo2.SetQuantizationOffset(offset);
2535
2536 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2537
2538 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2539 {
2540 1, 2, 3,
2541 4, 5, 6,
2542 7, 8, 9,
2543 10, 11, 12,
2544 13, 14, 15,
2545 16, 17, 18,
2546
2547 19, 20, 21,
2548 22, 23, 24,
2549 25, 26, 27,
2550 28, 29, 30,
2551 31, 32, 33,
2552 34, 35, 36,
2553
2554 37, 38, 39,
2555 40, 41, 42,
2556 43, 44, 45,
2557 46, 47, 48,
2558 49, 50, 51,
2559 52, 53, 54,
2560 })
2561 );
2562
2563 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2564 {
2565 1, 2, 3,
2566 4, 5, 6,
2567 7, 8, 9,
2568 10, 11, 12,
2569 13, 14, 15,
2570 16, 17, 18,
2571
2572 19, 20, 21,
2573 22, 23, 24,
2574 25, 26, 27,
2575 28, 29, 30,
2576 31, 32, 33,
2577 34, 35, 36,
2578 })
2579 );
2580
2581 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2582 {
2583 37, 38, 39,
2584 40, 41, 42,
2585 43, 44, 45,
2586 46, 47, 48,
2587 49, 50, 51,
2588 52, 53, 54,
2589 })
2590 );
2591
2592 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002593 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002594
2595 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002596 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002597
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002598 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002599 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002600
2601 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2602
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002603 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002604 subTensorsSupported ?
2605 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2606 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2607
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002608 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002609 subTensorsSupported ?
2610 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2611 workloadFactory.CreateTensorHandle(inputTensorInfo2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002612 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002613
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002614 ConcatQueueDescriptor data;
2615 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002616 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2617 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2618 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2619
2620 data.m_ViewOrigins.push_back(window1);
2621 data.m_ViewOrigins.push_back(window2);
2622
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002623 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002624
2625 inputHandle1->Allocate();
2626 inputHandle2->Allocate();
2627 outputHandle->Allocate();
2628
2629 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2630 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2631
2632 workload->PostAllocationConfigure();
2633 workload->Execute();
2634
2635 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2636
2637 return ret;
2638}
2639
2640LayerTestResult<uint16_t, 3> ConcatUint16Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002641 IWorkloadFactory& workloadFactory,
2642 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002643{
Jan Eilers8eb25602020-03-09 12:13:48 +00002644 IgnoreUnused(memoryManager);
Derek Lambertic374ff02019-12-10 21:57:35 +00002645
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002646 unsigned int outputWidth = 3;
2647 unsigned int outputHeight = 6;
2648 unsigned int outputChannels = 3;
2649
2650 unsigned int inputWidth1 = 3;
2651 unsigned int inputHeight1 = 6;
2652 unsigned int inputChannels1 = 2;
2653
2654 unsigned int inputWidth2 = 3;
2655 unsigned int inputHeight2 = 6;
2656 unsigned int inputChannels2 = 1;
2657
2658 // Defines the tensor descriptors.
Derek Lambertif90c56d2020-01-10 17:14:08 +00002659 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QSymmS16);
2660 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QSymmS16);
2661 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QSymmS16);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002662
2663 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2664 const float scale = 0.13497836f;
2665 const int32_t offset = -7;
2666
2667 outputTensorInfo.SetQuantizationScale(scale);
2668 outputTensorInfo.SetQuantizationOffset(offset);
2669 inputTensorInfo1.SetQuantizationScale(scale);
2670 inputTensorInfo1.SetQuantizationOffset(offset);
2671 inputTensorInfo2.SetQuantizationScale(scale);
2672 inputTensorInfo2.SetQuantizationOffset(offset);
2673
2674 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2675
2676 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2677 {
2678 1, 2, 3,
2679 4, 5, 6,
2680 7, 8, 9,
2681 10, 11, 12,
2682 13, 14, 15,
2683 16, 17, 18,
2684
2685 19, 20, 21,
2686 22, 23, 24,
2687 25, 26, 27,
2688 28, 29, 30,
2689 31, 32, 33,
2690 34, 35, 36,
2691
2692 37, 38, 39,
2693 40, 41, 42,
2694 43, 44, 45,
2695 46, 47, 48,
2696 49, 50, 51,
2697 52, 53, 54,
2698 }));
2699
2700 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2701 {
2702 1, 2, 3,
2703 4, 5, 6,
2704 7, 8, 9,
2705 10, 11, 12,
2706 13, 14, 15,
2707 16, 17, 18,
2708
2709 19, 20, 21,
2710 22, 23, 24,
2711 25, 26, 27,
2712 28, 29, 30,
2713 31, 32, 33,
2714 34, 35, 36,
2715 }));
2716
2717 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2718 {
2719 37, 38, 39,
2720 40, 41, 42,
2721 43, 44, 45,
2722 46, 47, 48,
2723 49, 50, 51,
2724 52, 53, 54,
2725 }));
2726
2727 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002728 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002729
2730 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002731 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002732
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002733 ARMNN_NO_DEPRECATE_WARN_BEGIN
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002734 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002735
2736 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2737
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002738 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002739 subTensorsSupported ?
2740 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2741 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002743 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002744 subTensorsSupported ?
2745 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2746 workloadFactory.CreateTensorHandle(inputTensorInfo2);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002747 ARMNN_NO_DEPRECATE_WARN_END
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002748
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002749 ConcatQueueDescriptor data;
2750 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002751 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2752 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2753 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2754
2755 data.m_ViewOrigins.push_back(window1);
2756 data.m_ViewOrigins.push_back(window2);
2757
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002758 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002759
2760 inputHandle1->Allocate();
2761 inputHandle2->Allocate();
2762 outputHandle->Allocate();
2763
2764 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2765 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2766
2767 workload->PostAllocationConfigure();
2768 workload->Execute();
2769
2770 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2771
2772 return ret;
2773}
2774
2775LayerTestResult<uint8_t, 1> Concat1dUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002776 IWorkloadFactory& workloadFactory,
2777 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002778{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002779 return Concat1dTestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002780}
2781
2782LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002783 IWorkloadFactory& workloadFactory,
2784 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002785{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002786 return Concat2dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002787}
2788
2789LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002790 IWorkloadFactory& workloadFactory,
2791 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002792{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002793 return Concat2dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002794}
2795
2796LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002797 IWorkloadFactory& workloadFactory,
2798 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002799{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002800 return Concat2dDim0DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002801 workloadFactory, memoryManager, 0.5f, -1);
2802}
2803
2804LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002805 IWorkloadFactory& workloadFactory,
2806 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002807{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002808 return Concat2dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002809 workloadFactory, memoryManager, 0.5f, -1);
2810}
2811
2812LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002813 IWorkloadFactory& workloadFactory,
2814 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002815{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002816 return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002817}
2818
2819LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002820 IWorkloadFactory& workloadFactory,
2821 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002822{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002823 return Concat3dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002824}
2825
2826LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002827 IWorkloadFactory& workloadFactory,
2828 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002829 bool useSubtensor)
2830{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002831 return Concat3dDim2TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002832 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2833}
2834
2835LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002836 IWorkloadFactory& workloadFactory,
2837 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002838{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002839 return Concat3dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002840}
2841
2842LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002843 IWorkloadFactory& workloadFactory,
2844 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002845{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002846 return Concat3dDim1DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002847 workloadFactory, memoryManager, 0.5f, -1);
2848}
2849
2850LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002851 IWorkloadFactory& workloadFactory,
2852 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002853 bool useSubtensor)
2854{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002855 return Concat3dDim2DiffInputDimsTestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002856 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2857}
2858
2859LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002860 IWorkloadFactory& workloadFactory,
2861 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002862{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002863 return Concat4dDim0TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002864}
2865
2866LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002867 IWorkloadFactory& workloadFactory,
2868 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002869{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002870 return Concat4dDim1TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002871}
2872
2873LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002874 IWorkloadFactory& workloadFactory,
2875 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002876{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002877 return Concat4dDim2TestImpl<DataType::QAsymmU8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002878}
2879
2880LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002881 IWorkloadFactory& workloadFactory,
2882 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002883{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002884 return Concat4dDim3TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002885 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2886}
2887
2888LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002889 IWorkloadFactory& workloadFactory,
2890 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002891{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002892 return Concat4dDiffShapeDim0TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002893 workloadFactory, memoryManager, 0.5f, -1);
2894}
2895
2896LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002897 IWorkloadFactory& workloadFactory,
2898 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002899{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002900 return Concat4dDiffShapeDim1TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002901 workloadFactory, memoryManager, 0.5f, -1);
2902}
2903
2904LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002905 IWorkloadFactory& workloadFactory,
2906 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002907{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002908 return Concat4dDiffShapeDim2TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002909 workloadFactory, memoryManager, 0.5f, -1);
2910}
2911
2912LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002913 IWorkloadFactory& workloadFactory,
2914 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002915 bool useSubtensor)
2916{
Derek Lambertif90c56d2020-01-10 17:14:08 +00002917 return Concat4dDiffShapeDim3TestImpl<DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002918 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2919}