blob: 53bfc205905f6fedf2370951e1f0e1fa00df0967 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConcatTestImpl.hpp"
7
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
10
11#include <armnn/ArmNN.hpp>
12
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/Permute.hpp>
14
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010015#include <backendsCommon/test/TensorCopyUtils.hpp>
16#include <backendsCommon/test/WorkloadTestUtils.hpp>
17
18#include <test/TensorHelpers.hpp>
19
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010020using namespace armnn;
21using namespace armnnUtils;
22
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023//
24// Helper functions and templates
25//
26
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010027OriginsDescriptor CreateDescriptorForConcat(
28 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010029 unsigned int concatDim)
30{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010031 std::vector<TensorShape> shapes;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010032 shapes.reserve(inputTensorInfos.size());
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010033 for (const TensorInfo& it: inputTensorInfos)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010034 {
35 shapes.push_back(it.GetShape());
36 }
37
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010038 return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010039}
40
41//
42// Concat is only supported for N and C dimensions for NCHW and the inner most dimension
43// In case of <4 dimensions we need to make sure that the concat dimensions are at least
44// the 3rd slowest iterating one or the inner most dimension.
45//
46
47bool NeedPermuteForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010048 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010049 unsigned int concatDim)
50{
51 // See note above. Additionally we expect the input shapes to have the
52 // same number of dimensions.
53 unsigned int nDimensions = 0;
54
55 // Determine the number of dimensions as well as sanity check them
56 // agains test implementation issues.
57 for (auto && tensorInfo : inputTensorInfos)
58 {
59 if (!nDimensions)
60 {
61 nDimensions = tensorInfo.GetShape().GetNumDimensions();
62 }
63 else
64 {
65 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
66 "Input shapes must have the same number of dimensions");
67 }
68 }
69
70 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
71}
72
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010073TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010074{
75 unsigned int numDims = inputShape.GetNumDimensions();
76 if (numDims >= 3)
77 {
78 // Nothing to do if the inputShape has at least 3 dimensions.
79 return inputShape;
80 }
81
82 std::vector<unsigned int> newDims(size_t(3), 1u);
83 unsigned int expandedBy = 3 - numDims;
84 for (unsigned int i=0; i<numDims; ++i)
85 {
86 newDims[expandedBy+i] = inputShape[i];
87 }
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010088 return TensorShape(3u, &newDims[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010089}
90
91void Generate3dPermuteVectorForConcat(
92 unsigned int numDimensions,
93 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010094 std::pair<PermutationVector, PermutationVector> & permutations)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010095{
96 BOOST_ASSERT_MSG(numDimensions <= 3,
97 "Only dimensions 1,2 and 3 are supported by this helper");
98 unsigned int expandedBy = 3 - numDimensions;
99 unsigned int expandedConcatAxis = concatDim + expandedBy;
100
101 if (expandedConcatAxis == 2)
102 {
103 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100104 PermutationVector forwardPermutation({1, 2, 0});
105 PermutationVector reversePermutation({2, 0, 1});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100106 permutations = std::make_pair(forwardPermutation, reversePermutation);
107 }
108 else if (expandedConcatAxis == 1)
109 {
110 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100111 PermutationVector forwardPermutation({2, 0, 1});
112 PermutationVector reversePermutation({1, 2, 0});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100113 permutations = std::make_pair(forwardPermutation, reversePermutation);
114 }
115 else
116 {
117 BOOST_ASSERT(expandedConcatAxis == 0);
118 concatDim = 0;
119 }
120}
121
122template<typename T> void PermuteTensorData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100123 IWorkloadFactory& workloadFactory,
124 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
125 const PermutationVector& mappings,
126 TensorInfo & inputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100127 const T * inputData,
128 std::vector<T>& outputData)
129{
130 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
131 if (inputData == nullptr)
132 {
133 // Nullptr is an error in the test. By returning without doing the concatenation
134 // I expect the caller to fail the test. It still makes sense to report this as
135 // an assert for Debug builds.
136 return;
137 }
138
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100139 TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100140
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100141 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
142 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100143
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100144 PermuteQueueDescriptor queueDescriptor;
145 queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
146 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100147 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
148 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
149
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100150 std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100151
152 inputHandle->Allocate();
153 outputHandle->Allocate();
154
155 CopyDataToITensorHandle(inputHandle.get(), inputData);
156
157 workload->PostAllocationConfigure();
158 workload->Execute();
159
160 outputData.resize(outputTensorInfo.GetNumElements());
161 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
162 inputTensorInfo = outputTensorInfo;
163}
164
165//
166// Permute the input tensors so we can do a supported concatenation.
167// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
168// at the front. Finally this function tells what the output shape
169// of the permuted concatenated tensor is going to be.
170//
171template<typename T> void PermuteInputsForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100172 IWorkloadFactory& workloadFactory,
173 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
174 std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100175 std::vector<T *> & inputData,
176 std::vector<std::vector<T>> & inputDataStorage,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100177 PermutationVector & permuteVector,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100178 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100179 TensorInfo & outputTensorInfo)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100180{
181 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
182 "Expecting more than one tensor to be concatenated here");
183
184 unsigned int numDims = 0;
185 unsigned int nthInput = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100186 const PermutationVector identity({0, 1, 2});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100187
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100188 std::pair<PermutationVector, PermutationVector> permutations =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100189 std::make_pair(identity, identity);
190
191 inputDataStorage.resize(inputData.size());
192
193 for (auto && tensorInfo : inputTensorInfos)
194 {
195 if (numDims == 0)
196 {
197 numDims = tensorInfo.GetShape().GetNumDimensions();
198 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
199
200 // Store the reverese permutation.
201 permuteVector = permutations.second;
202 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
203 "Test logic error, we don't need permutation, so we shouldn't arrive here");
204 }
205 else
206 {
207 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
208 "All inputs must have the same number of dimensions");
209 }
210
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100211 TensorInfo newTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100212 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
213
214 PermuteTensorData<T>(workloadFactory,
215 memoryManager,
216 permutations.first,
217 newTensorInfo,
218 inputData[nthInput],
219 inputDataStorage[nthInput]);
220
221 inputData[nthInput] = inputDataStorage[nthInput].data();
222 inputTensorInfos[nthInput] = newTensorInfo;
223
224 ++nthInput;
225 }
226
227 outputTensorInfo.SetShape(
228 armnnUtils::Permuted(
229 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
230 permutations.first));
231}
232
233//
234// This is the pair of PermuteInputsForConcat(...) which permutes back
235// the output of the concatenation so we can check it against an expected
236// output.
237//
238template <typename T> void PermuteOutputForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100239 IWorkloadFactory& workloadFactory,
240 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
241 const TensorInfo & tensorInfo,
242 const PermutationVector & permuteVector,
243 std::unique_ptr<ITensorHandle> && inputDataHandle,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100244 T * data)
245{
246 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
247 if (data == nullptr)
248 {
249 // Nullptr is an error in the test. By returning without doing the permutation
250 // I expect the caller to fail the test. It still makes sense to report this as
251 // an assert for Debug builds.
252 return;
253 }
254
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100255 TensorInfo resultTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100256 std::vector<T> inputData(tensorInfo.GetNumElements());
257 std::vector<T> outputData;
258
259 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
260
261 PermuteTensorData<T>(workloadFactory,
262 memoryManager,
263 permuteVector,
264 resultTensorInfo,
265 &inputData[0],
266 outputData);
267
268 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
269}
270
271template<typename T> void Concatenate(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100272 IWorkloadFactory& workloadFactory,
273 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
274 std::initializer_list<const TensorInfo> inputTensorInfosOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100275 std::initializer_list<T *> inputsOrig,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100276 const TensorInfo& outputTensorInfoOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100277 T * output,
278 unsigned int concatDim,
279 bool useSubtensor)
280{
281 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
282 if (output == nullptr)
283 {
284 // Nullptr is an error in the test. By returning without doing the permutation
285 // I expect the caller to fail the test. It still makes sense to report this as
286 // an assert for Debug builds.
287 return;
288 }
289
290 // Saves a copy of the parameters which we might need to change.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100291 std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100292 std::vector<T *> inputs = inputsOrig;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100293 TensorInfo outputTensorInfo = outputTensorInfoOrig;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100294
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100295 PermutationVector permuteVector{0, 1, 2};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100296
297 // Holds and automatically releases memory for the reshaped input data.
298 std::vector<std::vector<T>> tmpInputDataStorage;
299
300 const size_t inputCount = inputTensorInfos.size();
301
302 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
303
304 if (needPermuteForConcat)
305 {
306 //
307 // We need to permute the inputs, because concatenation along
308 // the requested axis is not supported.
309 //
310 PermuteInputsForConcat<T>(workloadFactory,
311 memoryManager,
312 inputTensorInfos,
313 inputs,
314 tmpInputDataStorage,
315 permuteVector,
316 concatDim,
317 outputTensorInfo);
318 }
319
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100320 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100321
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100322 std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100323 inputHandles.reserve(inputCount);
324
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100325 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100326
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100327 ConcatQueueDescriptor queueDescriptor;
328 OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100329 queueDescriptor.m_Parameters = viewsDescriptor;
330
331 if (useSubtensor)
332 {
333 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
334 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
335 {
336 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
337 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
338 }
339
340 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
341
342 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
343 for (unsigned int i = 0; i < inputCount; ++i)
344 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100345 const TensorInfo& inputTensorInfo = inputTensorInfos[i];
346 std::unique_ptr<ITensorHandle> inputHandle =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100347 subTensorsSupported ?
348 workloadFactory.CreateSubTensorHandle(*outputHandle,
349 inputTensorInfo.GetShape(),
350 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
351 workloadFactory.CreateTensorHandle(inputTensorInfo);
352
353 inputHandles.emplace_back(std::move(inputHandle));
354 }
355
356 }
357 else
358 {
359 for (unsigned int i = 0; i < inputCount; ++i)
360 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100361 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100362 inputHandles.emplace_back(std::move(inputHandle));
363 }
364 }
365
366 for (unsigned int i = 0; i < inputCount; ++i)
367 {
368 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
369 }
370
371 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
372
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100373 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100374
375 for (auto& inputHandle : inputHandles)
376 {
377 inputHandle->Allocate();
378 }
379
380 outputHandle->Allocate();
381
382 unsigned int nextInputId = 0;
383 for (auto& inputHandle : inputHandles)
384 {
385 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
386 ++nextInputId;
387 }
388
389 workload->PostAllocationConfigure();
390 workload->Execute();
391
392 if (needPermuteForConcat)
393 {
394 PermuteOutputForConcat<T>(workloadFactory,
395 memoryManager,
396 outputTensorInfo,
397 permuteVector,
398 std::move(outputHandle),
399 output);
400 }
401 else
402 {
403 CopyDataFromITensorHandle(output, outputHandle.get());
404 }
405}
406
407//
408// Implementation templates
409//
410
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100411template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100412LayerTestResult<T, 1> Concat1dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100413 IWorkloadFactory& workloadFactory,
414 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100415 float qScale,
416 int32_t qOffset)
417{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100418 TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100419
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100420 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
421 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
422 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100423
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100424 TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100425
426 LayerTestResult<T, 1> result(outputTensorInfo);
427
428 std::vector<T> output;
429 output.resize(outputTensorInfo.GetNumElements());
430 Concatenate<T>(workloadFactory, memoryManager,
431 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
432 { input0.data(), input1.data(), input2.data() },
433 outputTensorInfo,
434 output.data(),
435 0,
436 true);
437
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100438 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
439 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
440 {
441 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
442 },
443 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100444
445 return result;
446}
447
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100448template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100449LayerTestResult<T, 2> Concat2dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100450 IWorkloadFactory& workloadFactory,
451 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
452 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100453 unsigned int dimension,
454 const float qScale,
455 const int32_t qOffset)
456{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100457 TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100458
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100459 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
460 {
461 // Batch 0
462 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100463
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100464 // Batch 1
465 10.0f, 11.0f, 12.0f,
466 },
467 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100468
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100469 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
470 {
471 // Batch 0
472 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100473
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100474 // Batch 1
475 13.0f, 14.0f, 15.0f,
476 },
477 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100478
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100479 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
480 {
481 // Batch 0
482 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100483
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100484 // Batch 1
485 16.0f, 17.0f, 18.0f,
486 },
487 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100488
489 LayerTestResult<T, 2> result(outputTensorInfo);
490
491 std::vector<T> output;
492 output.resize(outputTensorInfo.GetNumElements());
493 Concatenate<T>(workloadFactory, memoryManager,
494 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
495 { input0.data(), input1.data(), input2.data() },
496 outputTensorInfo,
497 output.data(),
498 dimension,
499 true);
500
501 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
502 return result;
503}
504
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100505template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100506LayerTestResult<T, 2> Concat2dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100507 IWorkloadFactory& workloadFactory,
508 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100509 float qScale,
510 int32_t qOffset)
511{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100512 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100513
514 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
515 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
516
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100517 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
518 {
519 // Batch 0
520 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100521
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100522 // Batch 1
523 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100524
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100525 // Batch 2
526 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100527
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100528 // Batch 3
529 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100530
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100531 // Batch 4
532 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100533
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100534 // Batch 5
535 16.0f, 17.0f, 18.0f,
536 },
537 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100538
539 return result;
540}
541
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100542template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100543LayerTestResult<T, 2> Concat2dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100544 IWorkloadFactory& workloadFactory,
545 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100546 float qScale,
547 int32_t qOffset)
548{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100549 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100550
551 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
552 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
553
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100554 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
555 {
556 // Batch 0
557 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100558
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100559 // Batch 1
560 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
561 },
562 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100563
564 return result;
565}
566
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100567template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100568LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100569 IWorkloadFactory& workloadFactory,
570 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100571 float qScale,
572 int32_t qOffset)
573{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100574 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
575 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
576 {
577 // Batch 0
578 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100579
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100580 // Batch 1
581 10.0f, 11.0f, 12.0f,
582 },
583 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100584
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100585 TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
586 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
587 {
588 // Batch 0
589 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100590
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100591 // Batch 1
592 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100593
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100594 // Batch 0
595 7.0f, 8.0f, 9.0f,
596 },
597 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100598
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100599 TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
600 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
601 {
602 // Batch 1
603 16.0f, 17.0f, 18.0f,
604 },
605 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100606
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100607 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100608 LayerTestResult<T, 2> result(outputTensorInfo);
609
610 std::vector<T> output;
611 output.resize(outputTensorInfo.GetNumElements());
612 Concatenate<T>(workloadFactory, memoryManager,
613 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
614 { input0.data(), input1.data(), input2.data() },
615 outputTensorInfo,
616 output.data(),
617 0,
618 true);
619
620 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100621 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
622 {
623 // Batch 0
624 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100625
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100626 // Batch 1
627 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100628
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100629 // Batch 2
630 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100631
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100632 // Batch 3
633 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100634
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100635 // Batch 4
636 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100637
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100638 // Batch 5
639 16.0f, 17.0f, 18.0f,
640 },
641 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100642
643 return result;
644}
645
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100646template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100647LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100648 IWorkloadFactory& workloadFactory,
649 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100650 float qScale,
651 int32_t qOffset)
652{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100653 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
654 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
655 {
656 // Batch 0
657 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100658
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100659 // Batch 1
660 10.0f, 11.0f, 12.0f,
661 },
662 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100663
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100664 TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
665 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
666 {
667 // Batch 0
668 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100669
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100670 // Batch 1
671 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
672 },
673 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100674
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100675 TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
676 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
677 {
678 // Batch 0
679 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100680
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100681 // Batch 1
682 18.0f
683 },
684 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100685
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100686 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100687 LayerTestResult<T, 2> result(outputTensorInfo);
688
689 std::vector<T> output;
690 output.resize(outputTensorInfo.GetNumElements());
691 Concatenate<T>(workloadFactory, memoryManager,
692 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
693 { input0.data(), input1.data(), input2.data() },
694 outputTensorInfo,
695 output.data(),
696 1,
697 true);
698
699 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100700 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
701 {
702 // Batch 0
703 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100704
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100705 // Batch 1
706 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
707 },
708 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100709
710 return result;
711}
712
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100713template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100714LayerTestResult<T, 3> Concat3dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100715 IWorkloadFactory& workloadFactory,
716 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
717 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100718 unsigned int dimension,
719 bool useSubtensor,
720 float qScale,
721 int32_t qOffset)
722{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100723 TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100724
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100725 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
726 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100727 // Batch 0, Channel 0
728 1.0f, 2.0f,
729
730 // Batch 0, Channel 1
731 3.0f, 4.0f,
732
733 // Batch 0, Channel 2
734 5.0f, 6.0f,
735
736 // Batch 1, Channel 0
737 19.0f, 20.0f,
738
739 // Batch 1, Channel 1
740 21.0f, 22.0f,
741
742 // Batch 1, Channel 2
743 23.0f, 24.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100744 },
745 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100746
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100747 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
748 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100749 // Batch 0, Channel 0
750 7.0f, 8.0f,
751
752 // Batch 0, Channel 1
753 9.0f, 10.0f,
754
755 // Batch 0, Channel 2
756 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100757
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100758 // Batch 1, Channel 0
759 25.0f, 26.0f,
760
761 // Batch 1, Channel 1
762 27.0f, 28.0f,
763
764 // Batch 1, Channel 2
765 29.0f, 30.0f
766 },
767 qScale, qOffset));
768
769 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
770 {
771 // Batch 0, Channel 0
772 13.0f, 14.0f,
773
774 // Batch 0, Channel 1
775 15.0f, 16.0f,
776
777 // Batch 0, Channel 2
778 17.0f, 18.0f,
779
780 // Batch 1, Channel 0
781 31.0f, 32.0f,
782
783 // Batch 1, Channel 1
784 33.0f, 34.0f,
785
786 // Batch 1, Channel 2
787 35.0f, 36.0f
788 },
789 qScale, qOffset));
790
791 LayerTestResult<T, 3> result(outputTensorInfo);
792
793 std::vector<T> output;
794 output.resize(outputTensorInfo.GetNumElements());
795 Concatenate<T>(workloadFactory, memoryManager,
796 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
797 { input0.data(), input1.data(), input2.data() },
798 outputTensorInfo,
799 output.data(),
800 dimension,
801 useSubtensor);
802
803 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
804 return result;
805}
806
807template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
808LayerTestResult<T, 3> Concat3dDim0TestImpl(
809 IWorkloadFactory& workloadFactory,
810 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
811 float qScale,
812 int32_t qOffset)
813{
814 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
815
816 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
817 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
818
819 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
820 {
821 // Batch 0, Channel 0
822 1.0f, 2.0f,
823
824 // Batch 0, Channel 1
825 3.0f, 4.0f,
826
827 // Batch 0, Channel 2
828 5.0f, 6.0f,
829
830 // Batch 1, Channel 0
831 19.0f, 20.0f,
832
833 // Batch 1, Channel 1
834 21.0f, 22.0f,
835
836 // Batch 1, Channel 2
837 23.0f, 24.0f,
838
839 // Batch 2, Channel 0
840 7.0f, 8.0f,
841
842 // Batch 2, Channel 1
843 9.0f, 10.0f,
844
845 // Batch 2, Channel 2
846 11.0f, 12.0f,
847
848 // Batch 3, Channel 0
849 25.0f, 26.0f,
850
851 // Batch 3, Channel 1
852 27.0f, 28.0f,
853
854 // Batch 3, Channel 2
855 29.0f, 30.0f,
856
857 // Batch 4, Channel 0
858 13.0f, 14.0f,
859
860 // Batch 4, Channel 1
861 15.0f, 16.0f,
862
863 // Batch 4, Channel 2
864 17.0f, 18.0f,
865
866 // Batch 5, Channel 0
867 31.0f, 32.0f,
868
869 // Batch 5, Channel 1
870 33.0f, 34.0f,
871
872 // Batch 5, Channel 2
873 35.0f, 36.0f
874 },
875 qScale, qOffset));
876
877 return result;
878}
879
880template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
881LayerTestResult<T, 3> Concat3dDim1TestImpl(
882 IWorkloadFactory& workloadFactory,
883 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
884 float qScale,
885 int32_t qOffset)
886{
887 TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
888
889 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
890 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
891
892 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
893 {
894 // Batch 0, Channel 0
895 1.0f, 2.0f,
896
897 // Batch 0, Channel 1
898 3.0f, 4.0f,
899
900 // Batch 0, Channel 2
901 5.0f, 6.0f,
902
903 // Batch 0, Channel 3
904 7.0f, 8.0f,
905
906 // Batch 0, Channel 4
907 9.0f, 10.0f,
908
909 // Batch 0, Channel 5
910 11.0f, 12.0f,
911
912 // Batch 0, Channel 6
913 13.0f, 14.0f,
914
915 // Batch 0, Channel 7
916 15.0f, 16.0f,
917
918 // Batch 0, Channel 8
919 17.0f, 18.0f,
920
921 // Batch 1, Channel 0
922 19.0f, 20.0f,
923
924 // Batch 1, Channel 1
925 21.0f, 22.0f,
926
927 // Batch 1, Channel 2
928 23.0f, 24.0f,
929
930 // Batch 1, Channel 3
931 25.0f, 26.0f,
932
933 // Batch 1, Channel 4
934 27.0f, 28.0f,
935
936 // Batch 1, Channel 5
937 29.0f, 30.0f,
938
939 // Batch 1, Channel 6
940 31.0f, 32.0f,
941
942 // Batch 1, Channel 7
943 33.0f, 34.0f,
944
945 // Batch 1, Channel 8
946 35.0f, 36.0f
947 },
948 qScale, qOffset));
949
950 return result;
951}
952
953template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
954LayerTestResult<T, 3> Concat3dDim2TestImpl(
955 IWorkloadFactory& workloadFactory,
956 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
957 bool useSubtensor,
958 float qScale,
959 int32_t qOffset)
960{
961 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
962
963 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
964 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
965
966 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
967 {
968 // Batch 0, Channel 0
969 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
970
971 // Batch 0, Channel 1
972 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
973
974 // Batch 0, Channel 2
975 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
976
977 // Batch 1, Channel 0
978 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
979
980 // Batch 1, Channel 1
981 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
982
983 // Batch 1, Channel 2
984 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
985 },
986 qScale, qOffset));
987
988 return result;
989}
990
991template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
992LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
993 IWorkloadFactory& workloadFactory,
994 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
995 float qScale,
996 int32_t qOffset)
997{
998 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
999 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1000 {
1001 // Batch 0, Channel 0
1002 1.0f, 2.0f,
1003
1004 // Batch 0, Channel 1
1005 3.0f, 4.0f,
1006
1007 // Batch 0, Channel 2
1008 5.0f, 6.0f,
1009
1010 // Batch 1, Channel 0
1011 19.0f, 20.0f,
1012
1013 // Batch 1, Channel 1
1014 21.0f, 22.0f,
1015
1016 // Batch 1, Channel 2
1017 23.0f, 24.0f
1018 },
1019 qScale, qOffset));
1020
1021 TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1022 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1023 {
1024 // Batch 0, Channel 0
1025 7.0f, 8.0f,
1026
1027 // Batch 0, Channel 1
1028 9.0f, 10.0f,
1029
1030 // Batch 0, Channel 2
1031 11.0f, 12.0f,
1032 },
1033 qScale, qOffset));
1034
1035 TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1036 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1037 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001038 // Batch 0, Channel 0
1039 25.0f, 26.0f,
1040
1041 // Batch 0, Channel 1
1042 27.0f, 28.0f,
1043
1044 // Batch 0, Channel 2
1045 29.0f, 30.0f,
1046
1047 // Batch 1, Channel 0
1048 13.0f, 14.0f,
1049
1050 // Batch 1, Channel 1
1051 15.0f, 16.0f,
1052
1053 // Batch 1, Channel 2
1054 17.0f, 18.0f,
1055
1056 // Batch 2, Channel 0
1057 31.0f, 32.0f,
1058
1059 // Batch 2, Channel 1
1060 33.0f, 34.0f,
1061
1062 // Batch 2, Channel 2
1063 35.0f, 36.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001064 },
1065 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001066
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001067 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001068 LayerTestResult<T, 3> result(outputTensorInfo);
1069
1070 std::vector<T> output;
1071 output.resize(outputTensorInfo.GetNumElements());
1072 Concatenate<T>(workloadFactory, memoryManager,
1073 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1074 { input0.data(), input1.data(), input2.data() },
1075 outputTensorInfo,
1076 output.data(),
1077 0,
1078 true);
1079
1080 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001081 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1082 {
1083 // Batch 0, Channel 0
1084 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001085
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001086 // Batch 0, Channel 1
1087 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001088
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001089 // Batch 0, Channel 2
1090 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001091
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001092 // Batch 1, Channel 0
1093 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001094
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001095 // Batch 1, Channel 1
1096 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001097
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001098 // Batch 1, Channel 2
1099 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001100
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001101 // Batch 2, Channel 0
1102 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001103
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001104 // Batch 2, Channel 1
1105 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001106
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001107 // Batch 2, Channel 2
1108 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001109
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001110 // Batch 3, Channel 0
1111 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001112
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001113 // Batch 3, Channel 1
1114 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001115
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001116 // Batch 3, Channel 2
1117 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001118
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001119 // Batch 4, Channel 0
1120 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001121
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001122 // Batch 4, Channel 1
1123 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001124
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001125 // Batch 4, Channel 2
1126 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001127
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001128 // Batch 5, Channel 0
1129 31.0f, 32.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001130
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001131 // Batch 5, Channel 1
1132 33.0f, 34.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001133
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001134 // Batch 5, Channel 2
1135 35.0f, 36.0f
1136 },
1137 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001138
1139 return result;
1140}
1141
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001142template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001143LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001144 IWorkloadFactory& workloadFactory,
1145 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001146 float qScale,
1147 int32_t qOffset)
1148{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001149 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1150 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1151 {
1152 // Batch 0, Channel 0
1153 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001154
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001155 // Batch 0, Channel 1
1156 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001157
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001158 // Batch 0, Channel 2
1159 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001160
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001161 // Batch 1, Channel 0
1162 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001163
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001164 // Batch 1, Channel 1
1165 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001166
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001167 // Batch 1, Channel 2
1168 23.0f, 24.0f
1169 },
1170 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001171
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001172 TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1173 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1174 {
1175 // Batch 0, Channel 0
1176 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001177
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001178 // Batch 0, Channel 1
1179 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001180
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001181 // Batch 0, Channel 2
1182 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001183
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001184 // Batch 0, Channel 3
1185 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001186
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001187 // Batch 1, Channel 0
1188 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001189
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001190 // Batch 1, Channel 1
1191 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001192
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001193 // Batch 1, Channel 2
1194 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001195
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001196 // Batch 1, Channel 3
1197 15.0f, 16.0f,
1198 },
1199 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001200
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001201 TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1202 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1203 {
1204 // Batch 0, Channel 0
1205 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001206
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001207 // Batch 1, Channel 0
1208 31.0f, 32.0f,
1209 },
1210 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001211
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001212 TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001213 LayerTestResult<T, 3> result(outputTensorInfo);
1214
1215 std::vector<T> output;
1216 output.resize(outputTensorInfo.GetNumElements());
1217 Concatenate<T>(workloadFactory, memoryManager,
1218 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1219 { input0.data(), input1.data(), input2.data() },
1220 outputTensorInfo,
1221 output.data(),
1222 1,
1223 true);
1224
1225 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001226 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1227 {
1228 // Batch 0, Channel 0
1229 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001230
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001231 // Batch 0, Channel 1
1232 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001233
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001234 // Batch 0, Channel 2
1235 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001236
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001237 // Batch 0, Channel 3
1238 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001239
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001240 // Batch 0, Channel 4
1241 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001242
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001243 // Batch 0, Channel 5
1244 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001245
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001246 // Batch 0, Channel 6
1247 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001248
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001249 // Batch 0, Channel 7
1250 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001251
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001252 // Batch 1, Channel 0
1253 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001254
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001255 // Batch 1, Channel 1
1256 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001257
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001258 // Batch 1, Channel 2
1259 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001260
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001261 // Batch 1, Channel 3
1262 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001263
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001264 // Batch 1, Channel 4
1265 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001266
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001267 // Batch 1, Channel 5
1268 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001269
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001270 // Batch 1, Channel 6
1271 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001272
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001273 // Batch 1, Channel 7
1274 31.0f, 32.0f,
1275 },
1276 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001277
1278 return result;
1279}
1280
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001281template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001282LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001283 IWorkloadFactory& workloadFactory,
1284 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001285 bool useSubtensor,
1286 float qScale,
1287 int32_t qOffset)
1288{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001289 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1290 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1291 {
1292 // Batch 0, Channel 0
1293 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001294
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001295 // Batch 0, Channel 1
1296 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001297
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001298 // Batch 0, Channel 2
1299 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001300
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001301 // Batch 1, Channel 0
1302 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001303
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001304 // Batch 1, Channel 1
1305 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001306
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001307 // Batch 1, Channel 2
1308 23.0f, 24.0f
1309 },
1310 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001311
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001312 TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1313 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1314 {
1315 // Batch 0, Channel 0
1316 7.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001317
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001318 // Batch 0, Channel 1
1319 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001320
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001321 // Batch 0, Channel 2
1322 11.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001323
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001324 // Batch 1, Channel 0
1325 25.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001326
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001327 // Batch 1, Channel 1
1328 27.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001329
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001330 // Batch 1, Channel 2
1331 29.0f
1332 },
1333 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001334
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001335 TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1336 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1337 {
1338 // Batch 0, Channel 0
1339 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001340
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001341 // Batch 0, Channel 1
1342 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001343
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001344 // Batch 0, Channel 2
1345 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001346
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001347 // Batch 1, Channel 0
1348 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001349
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001350 // Batch 1, Channel 1
1351 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001352
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001353 // Batch 1, Channel 2
1354 35.0f, 36.0f, 55.0f,
1355 },
1356 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001357
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001358 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001359 LayerTestResult<T, 3> result(outputTensorInfo);
1360
1361 std::vector<T> output;
1362 output.resize(outputTensorInfo.GetNumElements());
1363 Concatenate<T>(workloadFactory, memoryManager,
1364 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1365 { input0.data(), input1.data(), input2.data() },
1366 outputTensorInfo,
1367 output.data(),
1368 2,
1369 useSubtensor);
1370
1371 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001372 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1373 {
1374 // Batch 0, Channel 0
1375 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001376
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001377 // Batch 0, Channel 1
1378 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001379
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001380 // Batch 0, Channel 2
1381 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001382
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001383 // Batch 1, Channel 0
1384 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001385
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001386 // Batch 1, Channel 1
1387 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001388
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001389 // Batch 1, Channel 2
1390 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1391 },
1392 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001393
1394 return result;
1395}
1396
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001397template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001398LayerTestResult<T, 4> Concat4dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001399 IWorkloadFactory& workloadFactory,
1400 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1401 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001402 unsigned int dimension,
1403 bool useSubtensor,
1404 float qScale,
1405 int32_t qOffset)
1406{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001407 TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001408
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001409 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1410 {
1411 1.0f, 2.0f,
1412 3.0f, 4.0f,
1413 5.0f, 6.0f,
1414 7.0f, 8.0f,
1415 9.0f, 10.0f,
1416 11.0f, 12.0f
1417 },
1418 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001419
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001420 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1421 {
1422 11.0f, 12.0f,
1423 13.0f, 14.0f,
1424 15.0f, 16.0f,
1425 17.0f, 18.0f,
1426 19.0f, 20.0f,
1427 21.0f, 22.0f
1428 },
1429 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001430
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001431 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1432 {
1433 21.0f, 22.0f,
1434 23.0f, 24.0f,
1435 25.0f, 26.0f,
1436 27.0f, 28.0f,
1437 29.0f, 30.0f,
1438 31.0f, 32.0f
1439 },
1440 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001441
1442 LayerTestResult<T, 4> result(outputTensorInfo);
1443
1444 std::vector<T> output;
1445 output.resize(outputTensorInfo.GetNumElements());
1446
1447 Concatenate<T>(workloadFactory,
1448 memoryManager,
1449 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1450 {input0.data(), input1.data(), input2.data()},
1451 outputTensorInfo,
1452 output.data(),
1453 dimension,
1454 useSubtensor);
1455
1456 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1457 return result;
1458}
1459
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001460template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001461LayerTestResult<T, 4> Concat4dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001462 IWorkloadFactory& workloadFactory,
1463 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001464 float qScale,
1465 int32_t qOffset)
1466{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001467 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001468
1469 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1470 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1471
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001472 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1473 {
1474 1.0f, 2.0f,
1475 3.0f, 4.0f,
1476 5.0f, 6.0f,
1477 7.0f, 8.0f,
1478 9.0f, 10.0f,
1479 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001480
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001481 11.0f, 12.0f,
1482 13.0f, 14.0f,
1483 15.0f, 16.0f,
1484 17.0f, 18.0f,
1485 19.0f, 20.0f,
1486 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001487
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001488 21.0f, 22.0f,
1489 23.0f, 24.0f,
1490 25.0f, 26.0f,
1491 27.0f, 28.0f,
1492 29.0f, 30.0f,
1493 31.0f, 32.0f
1494 },
1495 qScale, qOffset));
1496
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001497 return result;
1498}
1499
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001500template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001501LayerTestResult<T, 4> Concat4dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001502 IWorkloadFactory& workloadFactory,
1503 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001504 float qScale,
1505 int32_t qOffset)
1506{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001507 TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001508
1509 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1510 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1511
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001512 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1513 {
1514 1.0f, 2.0f,
1515 3.0f, 4.0f,
1516 5.0f, 6.0f,
1517 7.0f, 8.0f,
1518 9.0f, 10.0f,
1519 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001520
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001521 11.0f, 12.0f,
1522 13.0f, 14.0f,
1523 15.0f, 16.0f,
1524 17.0f, 18.0f,
1525 19.0f, 20.0f,
1526 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001527
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001528 21.0f, 22.0f,
1529 23.0f, 24.0f,
1530 25.0f, 26.0f,
1531 27.0f, 28.0f,
1532 29.0f, 30.0f,
1533 31.0f, 32.0f
1534 },
1535 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001536
1537 return result;
1538}
1539
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001540template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001541LayerTestResult<T, 4> Concat4dDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001542 IWorkloadFactory& workloadFactory,
1543 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001544 float qScale,
1545 int32_t qOffset)
1546{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001547 TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001548
1549 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1550 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1551
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001552 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1553 {
1554 1.0f, 2.0f,
1555 3.0f, 4.0f,
1556 11.0f, 12.0f,
1557 13.0f, 14.0f,
1558 21.0f, 22.0f,
1559 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001560
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001561 5.0f, 6.0f,
1562 7.0f, 8.0f,
1563 15.0f, 16.0f,
1564 17.0f, 18.0f,
1565 25.0f, 26.0f,
1566 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001567
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001568 9.0f, 10.0f,
1569 11.0f, 12.0f,
1570 19.0f, 20.0f,
1571 21.0f, 22.0f,
1572 29.0f, 30.0f,
1573 31.0f, 32.0f
1574 },
1575 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001576
1577 return result;
1578}
1579
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001580template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001581LayerTestResult<T, 4> Concat4dDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001582 IWorkloadFactory& workloadFactory,
1583 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001584 float qScale,
1585 int32_t qOffset,
1586 bool useSubtensor)
1587{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001588 TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001589
1590 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1591 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1592
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001593 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1594 {
1595 1.0f, 2.0f,
1596 11.0f, 12.0f,
1597 21.0f, 22.0f,
1598 3.0f, 4.0f,
1599 13.0f, 14.0f,
1600 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001601
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001602 5.0f, 6.0f,
1603 15.0f, 16.0f,
1604 25.0f, 26.0f,
1605 7.0f, 8.0f,
1606 17.0f, 18.0f,
1607 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001608
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001609 9.0f, 10.0f,
1610 19.0f, 20.0f,
1611 29.0f, 30.0f,
1612 11.0f, 12.0f,
1613 21.0f, 22.0f,
1614 31.0f, 32.0f
1615 },
1616 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001617
1618 return result;
1619}
1620
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001621template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001622LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001623 IWorkloadFactory& workloadFactory,
1624 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001625 float qScale,
1626 int32_t qOffset)
1627{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001628 constexpr unsigned int dimension = 0u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001629
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001630 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1631 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1632 {
1633 1.0f, 2.0f,
1634 3.0f, 4.0f,
1635 5.0f, 6.0f,
1636 7.0f, 8.0f,
1637 9.0f, 10.0f,
1638 11.0f, 12.0f
1639 },
1640 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001641
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001642 TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001643
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001644 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1645 {
1646 11.0f, 12.0f,
1647 13.0f, 14.0f,
1648 15.0f, 16.0f,
1649 17.0f, 18.0f,
1650 19.0f, 20.0f,
1651 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001652
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001653 21.0f, 22.0f,
1654 23.0f, 24.0f,
1655 25.0f, 26.0f,
1656 27.0f, 28.0f,
1657 29.0f, 30.0f,
1658 31.0f, 32.0f
1659 },
1660 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001661
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001662 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001663
1664 LayerTestResult<T, 4> result(outputTensorInfo);
1665
1666 std::vector<T> output;
1667 output.resize(outputTensorInfo.GetNumElements());
1668 Concatenate<T>(workloadFactory,
1669 memoryManager,
1670 {inputTensorInfo0, inputTensorInfo1},
1671 {input0.data(), input1.data()},
1672 outputTensorInfo,
1673 output.data(),
1674 dimension,
1675 true);
1676
1677 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001678 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1679 {
1680 1.0f, 2.0f,
1681 3.0f, 4.0f,
1682 5.0f, 6.0f,
1683 7.0f, 8.0f,
1684 9.0f, 10.0f,
1685 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001686
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001687 11.0f, 12.0f,
1688 13.0f, 14.0f,
1689 15.0f, 16.0f,
1690 17.0f, 18.0f,
1691 19.0f, 20.0f,
1692 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001693
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001694 21.0f, 22.0f,
1695 23.0f, 24.0f,
1696 25.0f, 26.0f,
1697 27.0f, 28.0f,
1698 29.0f, 30.0f,
1699 31.0f, 32.0f
1700 },
1701 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001702
1703 return result;
1704}
1705
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001706template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001707LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001708 IWorkloadFactory& workloadFactory,
1709 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001710 float qScale,
1711 int32_t qOffset)
1712{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001713 constexpr unsigned int dimension = 1u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001714
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001715 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1716 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1717 {
1718 1.0f, 2.0f,
1719 3.0f, 4.0f,
1720 5.0f, 6.0f,
1721 7.0f, 8.0f,
1722 9.0f, 10.0f,
1723 11.0f, 12.0f
1724 },
1725 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001726
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001727 TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001728
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001729 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1730 {
1731 11.0f, 12.0f,
1732 13.0f, 14.0f,
1733 15.0f, 16.0f,
1734 17.0f, 18.0f,
1735 },
1736 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001737
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001738 TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001739
1740 LayerTestResult<T, 4> result(outputTensorInfo);
1741
1742 std::vector<T> output;
1743 output.resize(outputTensorInfo.GetNumElements());
1744 Concatenate<T>(workloadFactory,
1745 memoryManager,
1746 {inputTensorInfo0, inputTensorInfo1},
1747 {input0.data(), input1.data()},
1748 outputTensorInfo,
1749 output.data(),
1750 dimension,
1751 true);
1752
1753 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001754 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1755 {
1756 1.0f, 2.0f,
1757 3.0f, 4.0f,
1758 5.0f, 6.0f,
1759 7.0f, 8.0f,
1760 9.0f, 10.0f,
1761 11.0f, 12.0f,
1762 11.0f, 12.0f,
1763 13.0f, 14.0f,
1764 15.0f, 16.0f,
1765 17.0f, 18.0f
1766 },
1767 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001768
1769 return result;
1770}
1771
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001772template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001773LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001774 IWorkloadFactory& workloadFactory,
1775 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001776 float qScale,
1777 int32_t qOffset)
1778{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001779 constexpr unsigned int dimension = 2u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001780
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001781 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1782 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1783 {
1784 1.0f, 2.0f,
1785 3.0f, 4.0f,
1786 5.0f, 6.0f,
1787 7.0f, 8.0f,
1788 9.0f, 10.0f,
1789 11.0f, 12.0f
1790 },
1791 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001792
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001793 TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1794 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1795 {
1796 11.0f, 12.0f,
1797 13.0f, 14.0f,
1798 15.0f, 16.0f,
1799 17.0f, 18.0f,
1800 19.0f, 20.0f,
1801 21.0f, 22.0f,
1802 23.0f, 24.0f,
1803 25.0f, 26.0f,
1804 27.0f, 28.0f
1805 },
1806 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001807
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001808 TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001809 LayerTestResult<T, 4> result(outputTensorInfo);
1810
1811 std::vector<T> output;
1812 output.resize(outputTensorInfo.GetNumElements());
1813 Concatenate<T>(workloadFactory,
1814 memoryManager,
1815 {inputTensorInfo0, inputTensorInfo1},
1816 {input0.data(), input1.data()},
1817 outputTensorInfo,
1818 output.data(),
1819 dimension,
1820 true);
1821
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001822 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1823 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1824 {
1825 1.0f, 2.0f,
1826 3.0f, 4.0f,
1827 11.0f, 12.0f,
1828 13.0f, 14.0f,
1829 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001830
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001831 5.0f, 6.0f,
1832 7.0f, 8.0f,
1833 17.0f, 18.0f,
1834 19.0f, 20.0f,
1835 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001836
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001837 9.0f, 10.0f,
1838 11.0f, 12.0f,
1839 23.0f, 24.0f,
1840 25.0f, 26.0f,
1841 27.0f, 28.0f
1842 },
1843 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001844
1845 return result;
1846}
1847
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001848template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001849LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001850 IWorkloadFactory& workloadFactory,
1851 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001852 float qScale,
1853 int32_t qOffset,
1854 bool useSubtensor)
1855{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001856 constexpr unsigned int dimension = 3u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001857
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001858 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1859 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1860 {
1861 1.0f, 2.0f,
1862 3.0f, 4.0f,
1863 5.0f, 6.0f,
1864 7.0f, 8.0f,
1865 9.0f, 10.0f,
1866 11.0f, 12.0f
1867 },
1868 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001869
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001870 TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1871 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1872 {
1873 11.0f, 12.0f, 13.0f,
1874 14.0f, 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001875
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001876 17.0f, 18.0f, 19.0f,
1877 20.0f, 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001878
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001879 23.0f, 24.0f, 25.0f,
1880 26.0f, 27.0f, 28.0f
1881 },
1882 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001883
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001884 TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001885
1886 LayerTestResult<T, 4> result(outputTensorInfo);
1887
1888 std::vector<T> output;
1889 output.resize(outputTensorInfo.GetNumElements());
1890 Concatenate<T>(workloadFactory,
1891 memoryManager,
1892 {inputTensorInfo0, inputTensorInfo1},
1893 {input0.data(), input1.data()},
1894 outputTensorInfo,
1895 output.data(),
1896 dimension,
1897 useSubtensor);
1898
1899 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001900 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1901 {
1902 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1903 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1904 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1905 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1906 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1907 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1908 },
1909 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001910
1911 return result;
1912}
1913
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001914template<DataType ArmnnType, typename T>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001915LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001916 IWorkloadFactory& workloadFactory,
1917 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001918 bool useSubtensor)
1919{
1920 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001921 TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1922 TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1923 TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001924
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001925 std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001926
1927 // Quantized input1 tensor.
1928 const float inputScale1 = 0.5f;
1929 const int32_t inputOffset1 = 5;
1930
1931 auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1932 {
1933 1, 2, 3,
1934 4, 5, 6,
1935 7, 8, 9,
1936 10, 11, 12,
1937 13, 14, 15,
1938 16, 17, 18,
1939
1940 19, 20, 21,
1941 22, 23, 24,
1942 25, 26, 27,
1943 28, 29, 30,
1944 31, 32, 33,
1945 34, 35, 36
1946 }));
1947
1948 // Quatized input2 tensor.
1949 const float inputScale2 = 0.2f;
1950 const int32_t inputOffset2 = 10;
1951
1952 auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1953 {
1954 37, 38, 39,
1955 40, 41, 42,
1956 43, 44, 45,
1957 46, 47, 48,
1958 49, 50, 51,
1959 52, 53, 54
1960 }));
1961
1962 // Quantized output tensor.
1963 const float outputScale = 0.1f;
1964 const int32_t outputOffset = 20;
1965
1966 LayerTestResult<T, 3> ret(outputTensorInfo);
1967
1968 ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1969 {
1970 0, 5, 74,
1971 10, 15, 76,
1972 20, 25, 78,
1973 30, 35, 80,
1974 40, 45, 82,
1975 50, 55, 84,
1976
1977 60, 65, 86,
1978 70, 75, 88,
1979 80, 85, 90,
1980 90, 95, 92,
1981 100, 105, 94,
1982 110, 115, 96,
1983
1984 120, 125, 98,
1985 130, 135, 100,
1986 140, 145, 102,
1987 150, 155, 104,
1988 160, 165, 106,
1989 170, 175, 108
1990 }));
1991
1992 outputTensorInfo.SetQuantizationScale(outputScale);
1993 outputTensorInfo.SetQuantizationOffset(outputOffset);
1994 inputTensorInfo1.SetQuantizationScale(inputScale1);
1995 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1996 inputTensorInfo2.SetQuantizationScale(inputScale2);
1997 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
1998
1999 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002000 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002001
2002 std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002003 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002004
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002005 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002006
2007 bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2008
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002009 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002010 subTensorsSupported ?
2011 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2012 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2013
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002014 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002015 subTensorsSupported ?
2016 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2017 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2018
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002019 ConcatQueueDescriptor data;
2020 OriginsDescriptor desc = CreateDescriptorForConcatenation(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002021 inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2022 data.m_Parameters = desc;
2023
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002024 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002025 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2026 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2027 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2028
2029 data.m_ViewOrigins.push_back(window1);
2030 data.m_ViewOrigins.push_back(window2);
2031
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002032 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002033
2034 inputHandle1->Allocate();
2035 inputHandle2->Allocate();
2036 outputHandle->Allocate();
2037
2038 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2039 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2040
2041 workload->PostAllocationConfigure();
2042 workload->Execute();
2043
2044 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2045
2046 return ret;
2047}
2048
2049//
2050// Explicit template specializations
2051//
2052
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002053template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
2054ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
2055 IWorkloadFactory& workloadFactory,
2056 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002057 bool useSubtensor);
2058
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002059template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
2060ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
2061 IWorkloadFactory& workloadFactory,
2062 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002063 bool useSubtensor);
2064
2065//
2066// Implementation functions
2067//
2068
2069LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002070 IWorkloadFactory& workloadFactory,
2071 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002072{
2073 unsigned int outputWidth = 3;
2074 unsigned int outputHeight = 6;
2075 unsigned int outputChannels = 3;
2076
2077 unsigned int inputWidth1 = 3;
2078 unsigned int inputHeight1 = 6;
2079 unsigned int inputChannels1 = 2;
2080
2081 unsigned int inputWidth2 = 3;
2082 unsigned int inputHeight2 = 6;
2083 unsigned int inputChannels2 = 1;
2084
2085 // Define the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002086 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2087 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2088 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002089
2090 LayerTestResult<float,3> ret(outputTensorInfo);
2091
2092 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2093 {
2094 1.0f, 2.0f, 3.0f,
2095 4.0f, 5.0f, 6.0f,
2096 7.0f, 8.0f, 9.0f,
2097 10.0f, 11.0f, 12.0f,
2098 13.0f, 14.0f, 15.0f,
2099 16.0f, 17.0f, 18.0f,
2100
2101 19.0f, 20.0f, 21.0f,
2102 22.0f, 23.0f, 24.0f,
2103 25.0f, 26.0f, 27.0f,
2104 28.0f, 29.0f, 30.0f,
2105 31.0f, 32.0f, 33.0f,
2106 34.0f, 35.0f, 36.0f,
2107
2108 37.0f, 38.0f, 39.0f,
2109 40.0f, 41.0f, 42.0f,
2110 43.0f, 44.0f, 45.0f,
2111 46.0f, 47.0f, 48.0f,
2112 49.0f, 50.0f, 51.0f,
2113 52.0f, 53.0f, 54.0f,
2114 })
2115 );
2116
2117 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2118 {
2119 1.0f, 2.0f, 3.0f,
2120 4.0f, 5.0f, 6.0f,
2121 7.0f, 8.0f, 9.0f,
2122 10.0f, 11.0f, 12.0f,
2123 13.0f, 14.0f, 15.0f,
2124 16.0f, 17.0f, 18.0f,
2125
2126 19.0f, 20.0f, 21.0f,
2127 22.0f, 23.0f, 24.0f,
2128 25.0f, 26.0f, 27.0f,
2129 28.0f, 29.0f, 30.0f,
2130 31.0f, 32.0f, 33.0f,
2131 34.0f, 35.0f, 36.0f,
2132 })
2133 );
2134
2135 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2136 {
2137 37.0f, 38.0f, 39.0f,
2138 40.0f, 41.0f, 42.0f,
2139 43.0f, 44.0f, 45.0f,
2140 46.0f, 47.0f, 48.0f,
2141 49.0f, 50.0f, 51.0f,
2142 52.0f, 53.0f, 54.0f,
2143 })
2144 );
2145
2146 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002147 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002148
2149 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002150 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002151
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002152 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002153
2154 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2155
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002156 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002157 subTensorsSupported ?
2158 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2159 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2160
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002161 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002162 subTensorsSupported ?
2163 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2164 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2165
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002166 ConcatQueueDescriptor data;
2167 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002168 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2169 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2170 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2171
2172 data.m_ViewOrigins.push_back(window1);
2173 data.m_ViewOrigins.push_back(window2);
2174
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002175 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002176
2177 inputHandle1->Allocate();
2178 inputHandle2->Allocate();
2179 outputHandle->Allocate();
2180
2181 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2182 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2183
2184 workload->PostAllocationConfigure();
2185 workload->Execute();
2186
2187 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2188
2189 return ret;
2190}
2191
2192LayerTestResult<float, 1> Concat1dTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002193 IWorkloadFactory& workloadFactory,
2194 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002195{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002196 return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002197}
2198
2199LayerTestResult<float, 2> Concat2dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002200 IWorkloadFactory& workloadFactory,
2201 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002202{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002203 return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002204}
2205
2206LayerTestResult<float, 2> Concat2dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002207 IWorkloadFactory& workloadFactory,
2208 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002209{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002210 return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002211}
2212
2213LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002214 IWorkloadFactory& workloadFactory,
2215 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002216{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002217 return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002218}
2219
2220LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002221 IWorkloadFactory& workloadFactory,
2222 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002223{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002224 return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002225}
2226
2227LayerTestResult<float, 3> Concat3dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002228 IWorkloadFactory& workloadFactory,
2229 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002230{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002231 return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002232}
2233
2234LayerTestResult<float, 3> Concat3dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002235 IWorkloadFactory& workloadFactory,
2236 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002237{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002238 return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002239}
2240
2241LayerTestResult<float, 3> Concat3dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002242 IWorkloadFactory& workloadFactory,
2243 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002244 bool useSubtensor)
2245{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002246 return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002247}
2248
2249LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002250 IWorkloadFactory& workloadFactory,
2251 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002252{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002253 return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002254 workloadFactory, memoryManager, 0.0f, 0);
2255}
2256
2257LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002258 IWorkloadFactory& workloadFactory,
2259 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002260{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002261 return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002262}
2263
2264LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002265 IWorkloadFactory& workloadFactory,
2266 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002267 bool useSubtensor)
2268{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002269 return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002270 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2271}
2272
2273LayerTestResult<float, 4> Concat4dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002274 IWorkloadFactory& workloadFactory,
2275 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002276{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002277 return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002278}
2279
2280LayerTestResult<float, 4> Concat4dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002281 IWorkloadFactory& workloadFactory,
2282 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002283{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002284 return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002285}
2286
2287LayerTestResult<float, 4> Concat4dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002288 IWorkloadFactory& workloadFactory,
2289 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002290{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002291 return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002292}
2293
2294LayerTestResult<float, 4> Concat4dDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002295 IWorkloadFactory& workloadFactory,
2296 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002297 bool useSubtensor)
2298{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002299 return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002300}
2301
2302LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002303 IWorkloadFactory& workloadFactory,
2304 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002305{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002306 return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002307}
2308
2309LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002310 IWorkloadFactory& workloadFactory,
2311 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002312{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002313 return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002314 workloadFactory, memoryManager, 0.0f, 0);
2315}
2316
2317LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002318 IWorkloadFactory& workloadFactory,
2319 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002320{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002321 return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002322}
2323
2324LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002325 IWorkloadFactory& workloadFactory,
2326 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002327 bool useSubtensor)
2328{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002329 return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002330 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2331}
2332
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002333LayerTestResult<Half, 3> ConcatFloat16Test(
2334 IWorkloadFactory& workloadFactory,
2335 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matthew Jackson9bff1442019-09-12 09:08:23 +01002336{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002337 return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
Matthew Jackson9bff1442019-09-12 09:08:23 +01002338}
2339
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002340LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002341 IWorkloadFactory& workloadFactory,
2342 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002343{
2344 unsigned int outputWidth = 3;
2345 unsigned int outputHeight = 6;
2346 unsigned int outputChannels = 3;
2347
2348 unsigned int inputWidth1 = 3;
2349 unsigned int inputHeight1 = 6;
2350 unsigned int inputChannels1 = 2;
2351
2352 unsigned int inputWidth2 = 3;
2353 unsigned int inputHeight2 = 6;
2354 unsigned int inputChannels2 = 1;
2355
2356 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002357 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
2358 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
2359 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002360
2361 // Quantized input1 tensor. Range [-3, 1]
2362 const float inputScale1 = 0.015686f;
2363 const int32_t inputOffset1 = 192;
2364
2365 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2366 {
2367 1, 2, 3,
2368 4, 5, 6,
2369 7, 8, 9,
2370 10, 11, 12,
2371 13, 14, 15,
2372 16, 17, 18,
2373
2374 19, 20, 21,
2375 22, 23, 24,
2376 25, 26, 27,
2377 28, 29, 30,
2378 31, 32, 33,
2379 34, 35, 36,
2380 })
2381 );
2382
2383 // Quatized input2 tensor. Range [-1, 4]
2384 const float inputScale2 = 0.019608f;
2385 const int32_t inputOffset2 = 50;
2386
2387 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2388 {
2389 37, 38, 39,
2390 40, 41, 42,
2391 43, 44, 45,
2392 46, 47, 48,
2393 49, 50, 51,
2394 52, 53, 54,
2395 })
2396 );
2397
2398 // Output has the same quantization parameters than input1,
2399 // so that only the requantization of input2 is required
2400 const float outputScale = 0.015686f;
2401 const int32_t outputOffset = 192;
2402
2403 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2404
2405 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2406 {
2407 1, 2, 3,
2408 4, 5, 6,
2409 7, 8, 9,
2410 10, 11, 12,
2411 13, 14, 15,
2412 16, 17, 18,
2413
2414 19, 20, 21,
2415 22, 23, 24,
2416 25, 26, 27,
2417 28, 29, 30,
2418 31, 32, 33,
2419 34, 35, 36,
2420
2421 176, 177, 178,
2422 179, 181, 182,
2423 183, 184, 186,
2424 187, 188, 189,
2425 191, 192, 193,
2426 195, 196, 197,
2427 })
2428 );
2429
2430 outputTensorInfo.SetQuantizationScale(outputScale);
2431 outputTensorInfo.SetQuantizationOffset(outputOffset);
2432 inputTensorInfo1.SetQuantizationScale(inputScale1);
2433 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2434 inputTensorInfo2.SetQuantizationScale(inputScale2);
2435 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2436
2437 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002438 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002439
2440 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002441 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002442
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002443 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002444
2445 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2446
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002447 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002448 subTensorsSupported ?
2449 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2450 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2451
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002452 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002453 subTensorsSupported ?
2454 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2455 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2456
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002457 ConcatQueueDescriptor data;
2458 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002459 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2460 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2461 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2462
2463 data.m_ViewOrigins.push_back(window1);
2464 data.m_ViewOrigins.push_back(window2);
2465
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002466 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002467
2468 inputHandle1->Allocate();
2469 inputHandle2->Allocate();
2470 outputHandle->Allocate();
2471
2472 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2473 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2474
2475 workload->PostAllocationConfigure();
2476 workload->Execute();
2477
2478 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2479
2480 return ret;
2481}
2482
2483LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002484 IWorkloadFactory& workloadFactory,
2485 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002486{
2487 unsigned int outputWidth = 3;
2488 unsigned int outputHeight = 6;
2489 unsigned int outputChannels = 3;
2490
2491 unsigned int inputWidth1 = 3;
2492 unsigned int inputHeight1 = 6;
2493 unsigned int inputChannels1 = 2;
2494
2495 unsigned int inputWidth2 = 3;
2496 unsigned int inputHeight2 = 6;
2497 unsigned int inputChannels2 = 1;
2498
2499 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002500 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
2501 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
2502 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002503
2504 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2505 const float scale = 0.13497836f;
2506 const int32_t offset = -7;
2507
2508 outputTensorInfo.SetQuantizationScale(scale);
2509 outputTensorInfo.SetQuantizationOffset(offset);
2510 inputTensorInfo1.SetQuantizationScale(scale);
2511 inputTensorInfo1.SetQuantizationOffset(offset);
2512 inputTensorInfo2.SetQuantizationScale(scale);
2513 inputTensorInfo2.SetQuantizationOffset(offset);
2514
2515 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2516
2517 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2518 {
2519 1, 2, 3,
2520 4, 5, 6,
2521 7, 8, 9,
2522 10, 11, 12,
2523 13, 14, 15,
2524 16, 17, 18,
2525
2526 19, 20, 21,
2527 22, 23, 24,
2528 25, 26, 27,
2529 28, 29, 30,
2530 31, 32, 33,
2531 34, 35, 36,
2532
2533 37, 38, 39,
2534 40, 41, 42,
2535 43, 44, 45,
2536 46, 47, 48,
2537 49, 50, 51,
2538 52, 53, 54,
2539 })
2540 );
2541
2542 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2543 {
2544 1, 2, 3,
2545 4, 5, 6,
2546 7, 8, 9,
2547 10, 11, 12,
2548 13, 14, 15,
2549 16, 17, 18,
2550
2551 19, 20, 21,
2552 22, 23, 24,
2553 25, 26, 27,
2554 28, 29, 30,
2555 31, 32, 33,
2556 34, 35, 36,
2557 })
2558 );
2559
2560 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2561 {
2562 37, 38, 39,
2563 40, 41, 42,
2564 43, 44, 45,
2565 46, 47, 48,
2566 49, 50, 51,
2567 52, 53, 54,
2568 })
2569 );
2570
2571 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002572 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002573
2574 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002575 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002576
2577
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002578 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002579
2580 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2581
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002582 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002583 subTensorsSupported ?
2584 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2585 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2586
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002587 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002588 subTensorsSupported ?
2589 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2590 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2591
2592
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002593 ConcatQueueDescriptor data;
2594 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002595 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2596 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2597 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2598
2599 data.m_ViewOrigins.push_back(window1);
2600 data.m_ViewOrigins.push_back(window2);
2601
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002602 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002603
2604 inputHandle1->Allocate();
2605 inputHandle2->Allocate();
2606 outputHandle->Allocate();
2607
2608 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2609 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2610
2611 workload->PostAllocationConfigure();
2612 workload->Execute();
2613
2614 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2615
2616 return ret;
2617}
2618
2619LayerTestResult<uint16_t, 3> ConcatUint16Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002620 IWorkloadFactory& workloadFactory,
2621 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002622{
2623 unsigned int outputWidth = 3;
2624 unsigned int outputHeight = 6;
2625 unsigned int outputChannels = 3;
2626
2627 unsigned int inputWidth1 = 3;
2628 unsigned int inputHeight1 = 6;
2629 unsigned int inputChannels1 = 2;
2630
2631 unsigned int inputWidth2 = 3;
2632 unsigned int inputHeight2 = 6;
2633 unsigned int inputChannels2 = 1;
2634
2635 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002636 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
2637 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
2638 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002639
2640 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2641 const float scale = 0.13497836f;
2642 const int32_t offset = -7;
2643
2644 outputTensorInfo.SetQuantizationScale(scale);
2645 outputTensorInfo.SetQuantizationOffset(offset);
2646 inputTensorInfo1.SetQuantizationScale(scale);
2647 inputTensorInfo1.SetQuantizationOffset(offset);
2648 inputTensorInfo2.SetQuantizationScale(scale);
2649 inputTensorInfo2.SetQuantizationOffset(offset);
2650
2651 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2652
2653 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2654 {
2655 1, 2, 3,
2656 4, 5, 6,
2657 7, 8, 9,
2658 10, 11, 12,
2659 13, 14, 15,
2660 16, 17, 18,
2661
2662 19, 20, 21,
2663 22, 23, 24,
2664 25, 26, 27,
2665 28, 29, 30,
2666 31, 32, 33,
2667 34, 35, 36,
2668
2669 37, 38, 39,
2670 40, 41, 42,
2671 43, 44, 45,
2672 46, 47, 48,
2673 49, 50, 51,
2674 52, 53, 54,
2675 }));
2676
2677 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2678 {
2679 1, 2, 3,
2680 4, 5, 6,
2681 7, 8, 9,
2682 10, 11, 12,
2683 13, 14, 15,
2684 16, 17, 18,
2685
2686 19, 20, 21,
2687 22, 23, 24,
2688 25, 26, 27,
2689 28, 29, 30,
2690 31, 32, 33,
2691 34, 35, 36,
2692 }));
2693
2694 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2695 {
2696 37, 38, 39,
2697 40, 41, 42,
2698 43, 44, 45,
2699 46, 47, 48,
2700 49, 50, 51,
2701 52, 53, 54,
2702 }));
2703
2704 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002705 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002706
2707 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002708 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002709
2710
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002711 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002712
2713 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2714
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002715 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002716 subTensorsSupported ?
2717 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2718 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2719
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002720 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002721 subTensorsSupported ?
2722 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2723 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2724
2725
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002726 ConcatQueueDescriptor data;
2727 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002728 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2729 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2730 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2731
2732 data.m_ViewOrigins.push_back(window1);
2733 data.m_ViewOrigins.push_back(window2);
2734
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002735 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002736
2737 inputHandle1->Allocate();
2738 inputHandle2->Allocate();
2739 outputHandle->Allocate();
2740
2741 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2742 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2743
2744 workload->PostAllocationConfigure();
2745 workload->Execute();
2746
2747 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2748
2749 return ret;
2750}
2751
2752LayerTestResult<uint8_t, 1> Concat1dUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002753 IWorkloadFactory& workloadFactory,
2754 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002755{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002756 return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002757}
2758
2759LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002760 IWorkloadFactory& workloadFactory,
2761 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002762{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002763 return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002764}
2765
2766LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002767 IWorkloadFactory& workloadFactory,
2768 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002769{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002770 return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002771}
2772
2773LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002774 IWorkloadFactory& workloadFactory,
2775 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002776{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002777 return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002778 workloadFactory, memoryManager, 0.5f, -1);
2779}
2780
2781LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002782 IWorkloadFactory& workloadFactory,
2783 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002784{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002785 return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002786 workloadFactory, memoryManager, 0.5f, -1);
2787}
2788
2789LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002790 IWorkloadFactory& workloadFactory,
2791 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002792{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002793 return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002794}
2795
2796LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002797 IWorkloadFactory& workloadFactory,
2798 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002799{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002800 return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002801}
2802
2803LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002804 IWorkloadFactory& workloadFactory,
2805 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002806 bool useSubtensor)
2807{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002808 return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002809 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2810}
2811
2812LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002813 IWorkloadFactory& workloadFactory,
2814 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002815{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002816 return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002817}
2818
2819LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002820 IWorkloadFactory& workloadFactory,
2821 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002822{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002823 return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002824 workloadFactory, memoryManager, 0.5f, -1);
2825}
2826
2827LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002828 IWorkloadFactory& workloadFactory,
2829 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002830 bool useSubtensor)
2831{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002832 return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002833 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2834}
2835
2836LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002837 IWorkloadFactory& workloadFactory,
2838 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002839{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002840 return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002841}
2842
2843LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002844 IWorkloadFactory& workloadFactory,
2845 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002846{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002847 return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002848}
2849
2850LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002851 IWorkloadFactory& workloadFactory,
2852 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002853{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002854 return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002855}
2856
2857LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002858 IWorkloadFactory& workloadFactory,
2859 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002860{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002861 return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002862 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2863}
2864
2865LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002866 IWorkloadFactory& workloadFactory,
2867 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002868{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002869 return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002870 workloadFactory, memoryManager, 0.5f, -1);
2871}
2872
2873LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002874 IWorkloadFactory& workloadFactory,
2875 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002876{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002877 return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002878 workloadFactory, memoryManager, 0.5f, -1);
2879}
2880
2881LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002882 IWorkloadFactory& workloadFactory,
2883 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002884{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002885 return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002886 workloadFactory, memoryManager, 0.5f, -1);
2887}
2888
2889LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002890 IWorkloadFactory& workloadFactory,
2891 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002892 bool useSubtensor)
2893{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002894 return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002895 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2896}