blob: e9932c81efb39f907c6b43f87837ab2450348292 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ConcatTestImpl.hpp"
7
8#include <Permute.hpp>
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01009#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010#include <ResolveType.hpp>
11
12#include <armnn/ArmNN.hpp>
13
14#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <test/TensorHelpers.hpp>
18
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010019using namespace armnn;
20using namespace armnnUtils;
21
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022//
23// Helper functions and templates
24//
25
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010026OriginsDescriptor CreateDescriptorForConcat(
27 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028 unsigned int concatDim)
29{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010030 std::vector<TensorShape> shapes;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010031 shapes.reserve(inputTensorInfos.size());
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010032 for (const TensorInfo& it: inputTensorInfos)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010033 {
34 shapes.push_back(it.GetShape());
35 }
36
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010037 return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010038}
39
40//
41// Concat is only supported for N and C dimensions for NCHW and the inner most dimension
42// In case of <4 dimensions we need to make sure that the concat dimensions are at least
43// the 3rd slowest iterating one or the inner most dimension.
44//
45
46bool NeedPermuteForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010047 const std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010048 unsigned int concatDim)
49{
50 // See note above. Additionally we expect the input shapes to have the
51 // same number of dimensions.
52 unsigned int nDimensions = 0;
53
54 // Determine the number of dimensions as well as sanity check them
55 // agains test implementation issues.
56 for (auto && tensorInfo : inputTensorInfos)
57 {
58 if (!nDimensions)
59 {
60 nDimensions = tensorInfo.GetShape().GetNumDimensions();
61 }
62 else
63 {
64 BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
65 "Input shapes must have the same number of dimensions");
66 }
67 }
68
69 return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
70}
71
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010072TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010073{
74 unsigned int numDims = inputShape.GetNumDimensions();
75 if (numDims >= 3)
76 {
77 // Nothing to do if the inputShape has at least 3 dimensions.
78 return inputShape;
79 }
80
81 std::vector<unsigned int> newDims(size_t(3), 1u);
82 unsigned int expandedBy = 3 - numDims;
83 for (unsigned int i=0; i<numDims; ++i)
84 {
85 newDims[expandedBy+i] = inputShape[i];
86 }
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010087 return TensorShape(3u, &newDims[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088}
89
90void Generate3dPermuteVectorForConcat(
91 unsigned int numDimensions,
92 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010093 std::pair<PermutationVector, PermutationVector> & permutations)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010094{
95 BOOST_ASSERT_MSG(numDimensions <= 3,
96 "Only dimensions 1,2 and 3 are supported by this helper");
97 unsigned int expandedBy = 3 - numDimensions;
98 unsigned int expandedConcatAxis = concatDim + expandedBy;
99
100 if (expandedConcatAxis == 2)
101 {
102 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100103 PermutationVector forwardPermutation({1, 2, 0});
104 PermutationVector reversePermutation({2, 0, 1});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100105 permutations = std::make_pair(forwardPermutation, reversePermutation);
106 }
107 else if (expandedConcatAxis == 1)
108 {
109 concatDim = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100110 PermutationVector forwardPermutation({2, 0, 1});
111 PermutationVector reversePermutation({1, 2, 0});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100112 permutations = std::make_pair(forwardPermutation, reversePermutation);
113 }
114 else
115 {
116 BOOST_ASSERT(expandedConcatAxis == 0);
117 concatDim = 0;
118 }
119}
120
121template<typename T> void PermuteTensorData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100122 IWorkloadFactory& workloadFactory,
123 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
124 const PermutationVector& mappings,
125 TensorInfo & inputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100126 const T * inputData,
127 std::vector<T>& outputData)
128{
129 BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
130 if (inputData == nullptr)
131 {
132 // Nullptr is an error in the test. By returning without doing the concatenation
133 // I expect the caller to fail the test. It still makes sense to report this as
134 // an assert for Debug builds.
135 return;
136 }
137
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100138 TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100139
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100140 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
141 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100142
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100143 PermuteQueueDescriptor queueDescriptor;
144 queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
145 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100146 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
147 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
148
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100149 std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100150
151 inputHandle->Allocate();
152 outputHandle->Allocate();
153
154 CopyDataToITensorHandle(inputHandle.get(), inputData);
155
156 workload->PostAllocationConfigure();
157 workload->Execute();
158
159 outputData.resize(outputTensorInfo.GetNumElements());
160 CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
161 inputTensorInfo = outputTensorInfo;
162}
163
164//
165// Permute the input tensors so we can do a supported concatenation.
166// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
167// at the front. Finally this function tells what the output shape
168// of the permuted concatenated tensor is going to be.
169//
170template<typename T> void PermuteInputsForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100171 IWorkloadFactory& workloadFactory,
172 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
173 std::vector<TensorInfo> & inputTensorInfos,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100174 std::vector<T *> & inputData,
175 std::vector<std::vector<T>> & inputDataStorage,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100176 PermutationVector & permuteVector,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100177 unsigned int & concatDim,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100178 TensorInfo & outputTensorInfo)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100179{
180 BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
181 "Expecting more than one tensor to be concatenated here");
182
183 unsigned int numDims = 0;
184 unsigned int nthInput = 0;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100185 const PermutationVector identity({0, 1, 2});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100186
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100187 std::pair<PermutationVector, PermutationVector> permutations =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100188 std::make_pair(identity, identity);
189
190 inputDataStorage.resize(inputData.size());
191
192 for (auto && tensorInfo : inputTensorInfos)
193 {
194 if (numDims == 0)
195 {
196 numDims = tensorInfo.GetShape().GetNumDimensions();
197 Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
198
199 // Store the reverese permutation.
200 permuteVector = permutations.second;
201 BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
202 "Test logic error, we don't need permutation, so we shouldn't arrive here");
203 }
204 else
205 {
206 BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
207 "All inputs must have the same number of dimensions");
208 }
209
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100210 TensorInfo newTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100211 newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
212
213 PermuteTensorData<T>(workloadFactory,
214 memoryManager,
215 permutations.first,
216 newTensorInfo,
217 inputData[nthInput],
218 inputDataStorage[nthInput]);
219
220 inputData[nthInput] = inputDataStorage[nthInput].data();
221 inputTensorInfos[nthInput] = newTensorInfo;
222
223 ++nthInput;
224 }
225
226 outputTensorInfo.SetShape(
227 armnnUtils::Permuted(
228 ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
229 permutations.first));
230}
231
232//
233// This is the pair of PermuteInputsForConcat(...) which permutes back
234// the output of the concatenation so we can check it against an expected
235// output.
236//
237template <typename T> void PermuteOutputForConcat(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100238 IWorkloadFactory& workloadFactory,
239 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
240 const TensorInfo & tensorInfo,
241 const PermutationVector & permuteVector,
242 std::unique_ptr<ITensorHandle> && inputDataHandle,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100243 T * data)
244{
245 BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
246 if (data == nullptr)
247 {
248 // Nullptr is an error in the test. By returning without doing the permutation
249 // I expect the caller to fail the test. It still makes sense to report this as
250 // an assert for Debug builds.
251 return;
252 }
253
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100254 TensorInfo resultTensorInfo = tensorInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100255 std::vector<T> inputData(tensorInfo.GetNumElements());
256 std::vector<T> outputData;
257
258 CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
259
260 PermuteTensorData<T>(workloadFactory,
261 memoryManager,
262 permuteVector,
263 resultTensorInfo,
264 &inputData[0],
265 outputData);
266
267 ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
268}
269
270template<typename T> void Concatenate(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100271 IWorkloadFactory& workloadFactory,
272 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
273 std::initializer_list<const TensorInfo> inputTensorInfosOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100274 std::initializer_list<T *> inputsOrig,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100275 const TensorInfo& outputTensorInfoOrig,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100276 T * output,
277 unsigned int concatDim,
278 bool useSubtensor)
279{
280 BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
281 if (output == nullptr)
282 {
283 // Nullptr is an error in the test. By returning without doing the permutation
284 // I expect the caller to fail the test. It still makes sense to report this as
285 // an assert for Debug builds.
286 return;
287 }
288
289 // Saves a copy of the parameters which we might need to change.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100290 std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100291 std::vector<T *> inputs = inputsOrig;
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100292 TensorInfo outputTensorInfo = outputTensorInfoOrig;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100293
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100294 PermutationVector permuteVector{0, 1, 2};
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100295
296 // Holds and automatically releases memory for the reshaped input data.
297 std::vector<std::vector<T>> tmpInputDataStorage;
298
299 const size_t inputCount = inputTensorInfos.size();
300
301 bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
302
303 if (needPermuteForConcat)
304 {
305 //
306 // We need to permute the inputs, because concatenation along
307 // the requested axis is not supported.
308 //
309 PermuteInputsForConcat<T>(workloadFactory,
310 memoryManager,
311 inputTensorInfos,
312 inputs,
313 tmpInputDataStorage,
314 permuteVector,
315 concatDim,
316 outputTensorInfo);
317 }
318
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100319 WorkloadInfo workloadInfo;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100320
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100321 std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100322 inputHandles.reserve(inputCount);
323
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100324 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100325
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100326 ConcatQueueDescriptor queueDescriptor;
327 OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100328 queueDescriptor.m_Parameters = viewsDescriptor;
329
330 if (useSubtensor)
331 {
332 queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
333 for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
334 {
335 queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
336 viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
337 }
338
339 outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
340
341 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
342 for (unsigned int i = 0; i < inputCount; ++i)
343 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100344 const TensorInfo& inputTensorInfo = inputTensorInfos[i];
345 std::unique_ptr<ITensorHandle> inputHandle =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100346 subTensorsSupported ?
347 workloadFactory.CreateSubTensorHandle(*outputHandle,
348 inputTensorInfo.GetShape(),
349 queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
350 workloadFactory.CreateTensorHandle(inputTensorInfo);
351
352 inputHandles.emplace_back(std::move(inputHandle));
353 }
354
355 }
356 else
357 {
358 for (unsigned int i = 0; i < inputCount; ++i)
359 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100360 std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100361 inputHandles.emplace_back(std::move(inputHandle));
362 }
363 }
364
365 for (unsigned int i = 0; i < inputCount; ++i)
366 {
367 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
368 }
369
370 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
371
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100372 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100373
374 for (auto& inputHandle : inputHandles)
375 {
376 inputHandle->Allocate();
377 }
378
379 outputHandle->Allocate();
380
381 unsigned int nextInputId = 0;
382 for (auto& inputHandle : inputHandles)
383 {
384 CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
385 ++nextInputId;
386 }
387
388 workload->PostAllocationConfigure();
389 workload->Execute();
390
391 if (needPermuteForConcat)
392 {
393 PermuteOutputForConcat<T>(workloadFactory,
394 memoryManager,
395 outputTensorInfo,
396 permuteVector,
397 std::move(outputHandle),
398 output);
399 }
400 else
401 {
402 CopyDataFromITensorHandle(output, outputHandle.get());
403 }
404}
405
406//
407// Implementation templates
408//
409
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100410template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100411LayerTestResult<T, 1> Concat1dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100412 IWorkloadFactory& workloadFactory,
413 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100414 float qScale,
415 int32_t qOffset)
416{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100417 TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100418
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100419 auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
420 auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
421 auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100422
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100423 TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100424
425 LayerTestResult<T, 1> result(outputTensorInfo);
426
427 std::vector<T> output;
428 output.resize(outputTensorInfo.GetNumElements());
429 Concatenate<T>(workloadFactory, memoryManager,
430 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
431 { input0.data(), input1.data(), input2.data() },
432 outputTensorInfo,
433 output.data(),
434 0,
435 true);
436
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100437 result.output = MakeTensor<T, 1>(outputTensorInfo, output);
438 result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
439 {
440 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
441 },
442 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100443
444 return result;
445}
446
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100447template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100448LayerTestResult<T, 2> Concat2dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100449 IWorkloadFactory& workloadFactory,
450 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
451 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100452 unsigned int dimension,
453 const float qScale,
454 const int32_t qOffset)
455{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100456 TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100457
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100458 auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
459 {
460 // Batch 0
461 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100462
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100463 // Batch 1
464 10.0f, 11.0f, 12.0f,
465 },
466 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100467
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100468 auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
469 {
470 // Batch 0
471 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100472
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100473 // Batch 1
474 13.0f, 14.0f, 15.0f,
475 },
476 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100477
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100478 auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
479 {
480 // Batch 0
481 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100482
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100483 // Batch 1
484 16.0f, 17.0f, 18.0f,
485 },
486 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100487
488 LayerTestResult<T, 2> result(outputTensorInfo);
489
490 std::vector<T> output;
491 output.resize(outputTensorInfo.GetNumElements());
492 Concatenate<T>(workloadFactory, memoryManager,
493 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
494 { input0.data(), input1.data(), input2.data() },
495 outputTensorInfo,
496 output.data(),
497 dimension,
498 true);
499
500 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
501 return result;
502}
503
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100504template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100505LayerTestResult<T, 2> Concat2dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100506 IWorkloadFactory& workloadFactory,
507 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100508 float qScale,
509 int32_t qOffset)
510{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100511 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100512
513 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
514 workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
515
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100516 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
517 {
518 // Batch 0
519 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100520
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100521 // Batch 1
522 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100523
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100524 // Batch 2
525 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100526
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100527 // Batch 3
528 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100529
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100530 // Batch 4
531 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100532
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100533 // Batch 5
534 16.0f, 17.0f, 18.0f,
535 },
536 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100537
538 return result;
539}
540
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100541template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100542LayerTestResult<T, 2> Concat2dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100543 IWorkloadFactory& workloadFactory,
544 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100545 float qScale,
546 int32_t qOffset)
547{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100548 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100549
550 LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
551 workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
552
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100553 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
554 {
555 // Batch 0
556 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100557
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100558 // Batch 1
559 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
560 },
561 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100562
563 return result;
564}
565
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100566template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100567LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100568 IWorkloadFactory& workloadFactory,
569 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100570 float qScale,
571 int32_t qOffset)
572{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100573 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
574 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
575 {
576 // Batch 0
577 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100578
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100579 // Batch 1
580 10.0f, 11.0f, 12.0f,
581 },
582 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100583
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100584 TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
585 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
586 {
587 // Batch 0
588 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100589
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100590 // Batch 1
591 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100592
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100593 // Batch 0
594 7.0f, 8.0f, 9.0f,
595 },
596 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100597
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100598 TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
599 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
600 {
601 // Batch 1
602 16.0f, 17.0f, 18.0f,
603 },
604 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100605
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100606 TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100607 LayerTestResult<T, 2> result(outputTensorInfo);
608
609 std::vector<T> output;
610 output.resize(outputTensorInfo.GetNumElements());
611 Concatenate<T>(workloadFactory, memoryManager,
612 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
613 { input0.data(), input1.data(), input2.data() },
614 outputTensorInfo,
615 output.data(),
616 0,
617 true);
618
619 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100620 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
621 {
622 // Batch 0
623 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100624
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100625 // Batch 1
626 10.0f, 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100627
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100628 // Batch 2
629 4.0f, 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100630
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100631 // Batch 3
632 13.0f, 14.0f, 15.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100633
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100634 // Batch 4
635 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100636
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100637 // Batch 5
638 16.0f, 17.0f, 18.0f,
639 },
640 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100641
642 return result;
643}
644
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100645template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100646LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100647 IWorkloadFactory& workloadFactory,
648 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100649 float qScale,
650 int32_t qOffset)
651{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100652 TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
653 auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
654 {
655 // Batch 0
656 1.0f, 2.0f, 3.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100657
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100658 // Batch 1
659 10.0f, 11.0f, 12.0f,
660 },
661 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100662
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100663 TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
664 auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
665 {
666 // Batch 0
667 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100668
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100669 // Batch 1
670 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
671 },
672 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100673
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100674 TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
675 auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
676 {
677 // Batch 0
678 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100679
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100680 // Batch 1
681 18.0f
682 },
683 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100684
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100685 TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100686 LayerTestResult<T, 2> result(outputTensorInfo);
687
688 std::vector<T> output;
689 output.resize(outputTensorInfo.GetNumElements());
690 Concatenate<T>(workloadFactory, memoryManager,
691 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
692 { input0.data(), input1.data(), input2.data() },
693 outputTensorInfo,
694 output.data(),
695 1,
696 true);
697
698 result.output = MakeTensor<T, 2>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100699 result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
700 {
701 // Batch 0
702 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100703
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100704 // Batch 1
705 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
706 },
707 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100708
709 return result;
710}
711
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100712template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100713LayerTestResult<T, 3> Concat3dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100714 IWorkloadFactory& workloadFactory,
715 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
716 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100717 unsigned int dimension,
718 bool useSubtensor,
719 float qScale,
720 int32_t qOffset)
721{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100722 TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100723
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100724 auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
725 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100726 // Batch 0, Channel 0
727 1.0f, 2.0f,
728
729 // Batch 0, Channel 1
730 3.0f, 4.0f,
731
732 // Batch 0, Channel 2
733 5.0f, 6.0f,
734
735 // Batch 1, Channel 0
736 19.0f, 20.0f,
737
738 // Batch 1, Channel 1
739 21.0f, 22.0f,
740
741 // Batch 1, Channel 2
742 23.0f, 24.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100743 },
744 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100745
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100746 auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
747 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100748 // Batch 0, Channel 0
749 7.0f, 8.0f,
750
751 // Batch 0, Channel 1
752 9.0f, 10.0f,
753
754 // Batch 0, Channel 2
755 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100756
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100757 // Batch 1, Channel 0
758 25.0f, 26.0f,
759
760 // Batch 1, Channel 1
761 27.0f, 28.0f,
762
763 // Batch 1, Channel 2
764 29.0f, 30.0f
765 },
766 qScale, qOffset));
767
768 auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
769 {
770 // Batch 0, Channel 0
771 13.0f, 14.0f,
772
773 // Batch 0, Channel 1
774 15.0f, 16.0f,
775
776 // Batch 0, Channel 2
777 17.0f, 18.0f,
778
779 // Batch 1, Channel 0
780 31.0f, 32.0f,
781
782 // Batch 1, Channel 1
783 33.0f, 34.0f,
784
785 // Batch 1, Channel 2
786 35.0f, 36.0f
787 },
788 qScale, qOffset));
789
790 LayerTestResult<T, 3> result(outputTensorInfo);
791
792 std::vector<T> output;
793 output.resize(outputTensorInfo.GetNumElements());
794 Concatenate<T>(workloadFactory, memoryManager,
795 { inputTensorInfo, inputTensorInfo, inputTensorInfo },
796 { input0.data(), input1.data(), input2.data() },
797 outputTensorInfo,
798 output.data(),
799 dimension,
800 useSubtensor);
801
802 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
803 return result;
804}
805
806template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
807LayerTestResult<T, 3> Concat3dDim0TestImpl(
808 IWorkloadFactory& workloadFactory,
809 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
810 float qScale,
811 int32_t qOffset)
812{
813 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
814
815 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
816 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
817
818 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
819 {
820 // Batch 0, Channel 0
821 1.0f, 2.0f,
822
823 // Batch 0, Channel 1
824 3.0f, 4.0f,
825
826 // Batch 0, Channel 2
827 5.0f, 6.0f,
828
829 // Batch 1, Channel 0
830 19.0f, 20.0f,
831
832 // Batch 1, Channel 1
833 21.0f, 22.0f,
834
835 // Batch 1, Channel 2
836 23.0f, 24.0f,
837
838 // Batch 2, Channel 0
839 7.0f, 8.0f,
840
841 // Batch 2, Channel 1
842 9.0f, 10.0f,
843
844 // Batch 2, Channel 2
845 11.0f, 12.0f,
846
847 // Batch 3, Channel 0
848 25.0f, 26.0f,
849
850 // Batch 3, Channel 1
851 27.0f, 28.0f,
852
853 // Batch 3, Channel 2
854 29.0f, 30.0f,
855
856 // Batch 4, Channel 0
857 13.0f, 14.0f,
858
859 // Batch 4, Channel 1
860 15.0f, 16.0f,
861
862 // Batch 4, Channel 2
863 17.0f, 18.0f,
864
865 // Batch 5, Channel 0
866 31.0f, 32.0f,
867
868 // Batch 5, Channel 1
869 33.0f, 34.0f,
870
871 // Batch 5, Channel 2
872 35.0f, 36.0f
873 },
874 qScale, qOffset));
875
876 return result;
877}
878
879template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
880LayerTestResult<T, 3> Concat3dDim1TestImpl(
881 IWorkloadFactory& workloadFactory,
882 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
883 float qScale,
884 int32_t qOffset)
885{
886 TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
887
888 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
889 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
890
891 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
892 {
893 // Batch 0, Channel 0
894 1.0f, 2.0f,
895
896 // Batch 0, Channel 1
897 3.0f, 4.0f,
898
899 // Batch 0, Channel 2
900 5.0f, 6.0f,
901
902 // Batch 0, Channel 3
903 7.0f, 8.0f,
904
905 // Batch 0, Channel 4
906 9.0f, 10.0f,
907
908 // Batch 0, Channel 5
909 11.0f, 12.0f,
910
911 // Batch 0, Channel 6
912 13.0f, 14.0f,
913
914 // Batch 0, Channel 7
915 15.0f, 16.0f,
916
917 // Batch 0, Channel 8
918 17.0f, 18.0f,
919
920 // Batch 1, Channel 0
921 19.0f, 20.0f,
922
923 // Batch 1, Channel 1
924 21.0f, 22.0f,
925
926 // Batch 1, Channel 2
927 23.0f, 24.0f,
928
929 // Batch 1, Channel 3
930 25.0f, 26.0f,
931
932 // Batch 1, Channel 4
933 27.0f, 28.0f,
934
935 // Batch 1, Channel 5
936 29.0f, 30.0f,
937
938 // Batch 1, Channel 6
939 31.0f, 32.0f,
940
941 // Batch 1, Channel 7
942 33.0f, 34.0f,
943
944 // Batch 1, Channel 8
945 35.0f, 36.0f
946 },
947 qScale, qOffset));
948
949 return result;
950}
951
952template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
953LayerTestResult<T, 3> Concat3dDim2TestImpl(
954 IWorkloadFactory& workloadFactory,
955 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
956 bool useSubtensor,
957 float qScale,
958 int32_t qOffset)
959{
960 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
961
962 LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
963 workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
964
965 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
966 {
967 // Batch 0, Channel 0
968 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
969
970 // Batch 0, Channel 1
971 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
972
973 // Batch 0, Channel 2
974 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
975
976 // Batch 1, Channel 0
977 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
978
979 // Batch 1, Channel 1
980 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
981
982 // Batch 1, Channel 2
983 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
984 },
985 qScale, qOffset));
986
987 return result;
988}
989
990template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
991LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
992 IWorkloadFactory& workloadFactory,
993 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
994 float qScale,
995 int32_t qOffset)
996{
997 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
998 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
999 {
1000 // Batch 0, Channel 0
1001 1.0f, 2.0f,
1002
1003 // Batch 0, Channel 1
1004 3.0f, 4.0f,
1005
1006 // Batch 0, Channel 2
1007 5.0f, 6.0f,
1008
1009 // Batch 1, Channel 0
1010 19.0f, 20.0f,
1011
1012 // Batch 1, Channel 1
1013 21.0f, 22.0f,
1014
1015 // Batch 1, Channel 2
1016 23.0f, 24.0f
1017 },
1018 qScale, qOffset));
1019
1020 TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
1021 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1022 {
1023 // Batch 0, Channel 0
1024 7.0f, 8.0f,
1025
1026 // Batch 0, Channel 1
1027 9.0f, 10.0f,
1028
1029 // Batch 0, Channel 2
1030 11.0f, 12.0f,
1031 },
1032 qScale, qOffset));
1033
1034 TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
1035 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1036 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001037 // Batch 0, Channel 0
1038 25.0f, 26.0f,
1039
1040 // Batch 0, Channel 1
1041 27.0f, 28.0f,
1042
1043 // Batch 0, Channel 2
1044 29.0f, 30.0f,
1045
1046 // Batch 1, Channel 0
1047 13.0f, 14.0f,
1048
1049 // Batch 1, Channel 1
1050 15.0f, 16.0f,
1051
1052 // Batch 1, Channel 2
1053 17.0f, 18.0f,
1054
1055 // Batch 2, Channel 0
1056 31.0f, 32.0f,
1057
1058 // Batch 2, Channel 1
1059 33.0f, 34.0f,
1060
1061 // Batch 2, Channel 2
1062 35.0f, 36.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001063 },
1064 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001065
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001066 TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001067 LayerTestResult<T, 3> result(outputTensorInfo);
1068
1069 std::vector<T> output;
1070 output.resize(outputTensorInfo.GetNumElements());
1071 Concatenate<T>(workloadFactory, memoryManager,
1072 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1073 { input0.data(), input1.data(), input2.data() },
1074 outputTensorInfo,
1075 output.data(),
1076 0,
1077 true);
1078
1079 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001080 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1081 {
1082 // Batch 0, Channel 0
1083 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001084
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001085 // Batch 0, Channel 1
1086 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001087
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001088 // Batch 0, Channel 2
1089 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001090
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001091 // Batch 1, Channel 0
1092 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001093
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001094 // Batch 1, Channel 1
1095 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001096
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001097 // Batch 1, Channel 2
1098 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001099
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001100 // Batch 2, Channel 0
1101 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001102
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001103 // Batch 2, Channel 1
1104 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001105
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001106 // Batch 2, Channel 2
1107 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001108
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001109 // Batch 3, Channel 0
1110 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001111
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001112 // Batch 3, Channel 1
1113 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001114
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001115 // Batch 3, Channel 2
1116 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001117
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001118 // Batch 4, Channel 0
1119 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001120
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001121 // Batch 4, Channel 1
1122 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001123
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001124 // Batch 4, Channel 2
1125 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001126
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001127 // Batch 5, Channel 0
1128 31.0f, 32.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001129
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001130 // Batch 5, Channel 1
1131 33.0f, 34.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001132
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001133 // Batch 5, Channel 2
1134 35.0f, 36.0f
1135 },
1136 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001137
1138 return result;
1139}
1140
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001141template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001142LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001143 IWorkloadFactory& workloadFactory,
1144 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001145 float qScale,
1146 int32_t qOffset)
1147{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001148 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1149 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1150 {
1151 // Batch 0, Channel 0
1152 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001153
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001154 // Batch 0, Channel 1
1155 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001156
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001157 // Batch 0, Channel 2
1158 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001159
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001160 // Batch 1, Channel 0
1161 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001162
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001163 // Batch 1, Channel 1
1164 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001165
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001166 // Batch 1, Channel 2
1167 23.0f, 24.0f
1168 },
1169 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001170
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001171 TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
1172 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1173 {
1174 // Batch 0, Channel 0
1175 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001176
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001177 // Batch 0, Channel 1
1178 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001179
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001180 // Batch 0, Channel 2
1181 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001182
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001183 // Batch 0, Channel 3
1184 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001185
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001186 // Batch 1, Channel 0
1187 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001188
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001189 // Batch 1, Channel 1
1190 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001191
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001192 // Batch 1, Channel 2
1193 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001194
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001195 // Batch 1, Channel 3
1196 15.0f, 16.0f,
1197 },
1198 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001199
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001200 TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
1201 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1202 {
1203 // Batch 0, Channel 0
1204 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001205
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001206 // Batch 1, Channel 0
1207 31.0f, 32.0f,
1208 },
1209 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001210
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001211 TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001212 LayerTestResult<T, 3> result(outputTensorInfo);
1213
1214 std::vector<T> output;
1215 output.resize(outputTensorInfo.GetNumElements());
1216 Concatenate<T>(workloadFactory, memoryManager,
1217 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1218 { input0.data(), input1.data(), input2.data() },
1219 outputTensorInfo,
1220 output.data(),
1221 1,
1222 true);
1223
1224 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001225 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1226 {
1227 // Batch 0, Channel 0
1228 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001229
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001230 // Batch 0, Channel 1
1231 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001232
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001233 // Batch 0, Channel 2
1234 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001235
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001236 // Batch 0, Channel 3
1237 7.0f, 8.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001238
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001239 // Batch 0, Channel 4
1240 9.0f, 10.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001241
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001242 // Batch 0, Channel 5
1243 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001244
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001245 // Batch 0, Channel 6
1246 25.0f, 26.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001247
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001248 // Batch 0, Channel 7
1249 17.0f, 18.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001250
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001251 // Batch 1, Channel 0
1252 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001253
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001254 // Batch 1, Channel 1
1255 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001256
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001257 // Batch 1, Channel 2
1258 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001259
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001260 // Batch 1, Channel 3
1261 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001262
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001263 // Batch 1, Channel 4
1264 29.0f, 30.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001265
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001266 // Batch 1, Channel 5
1267 13.0f, 14.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001268
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001269 // Batch 1, Channel 6
1270 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001271
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001272 // Batch 1, Channel 7
1273 31.0f, 32.0f,
1274 },
1275 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001276
1277 return result;
1278}
1279
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001280template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001281LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001282 IWorkloadFactory& workloadFactory,
1283 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001284 bool useSubtensor,
1285 float qScale,
1286 int32_t qOffset)
1287{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001288 TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
1289 auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
1290 {
1291 // Batch 0, Channel 0
1292 1.0f, 2.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001293
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001294 // Batch 0, Channel 1
1295 3.0f, 4.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001296
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001297 // Batch 0, Channel 2
1298 5.0f, 6.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001299
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001300 // Batch 1, Channel 0
1301 19.0f, 20.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001302
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001303 // Batch 1, Channel 1
1304 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001305
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001306 // Batch 1, Channel 2
1307 23.0f, 24.0f
1308 },
1309 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001310
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001311 TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
1312 auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
1313 {
1314 // Batch 0, Channel 0
1315 7.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001316
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001317 // Batch 0, Channel 1
1318 9.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001319
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001320 // Batch 0, Channel 2
1321 11.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001322
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001323 // Batch 1, Channel 0
1324 25.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001325
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001326 // Batch 1, Channel 1
1327 27.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001328
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001329 // Batch 1, Channel 2
1330 29.0f
1331 },
1332 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001333
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001334 TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
1335 auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
1336 {
1337 // Batch 0, Channel 0
1338 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001339
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001340 // Batch 0, Channel 1
1341 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001342
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001343 // Batch 0, Channel 2
1344 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001345
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001346 // Batch 1, Channel 0
1347 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001348
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001349 // Batch 1, Channel 1
1350 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001351
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001352 // Batch 1, Channel 2
1353 35.0f, 36.0f, 55.0f,
1354 },
1355 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001356
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001357 TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001358 LayerTestResult<T, 3> result(outputTensorInfo);
1359
1360 std::vector<T> output;
1361 output.resize(outputTensorInfo.GetNumElements());
1362 Concatenate<T>(workloadFactory, memoryManager,
1363 { input0TensorInfo, input1TensorInfo, input2TensorInfo },
1364 { input0.data(), input1.data(), input2.data() },
1365 outputTensorInfo,
1366 output.data(),
1367 2,
1368 useSubtensor);
1369
1370 result.output = MakeTensor<T, 3>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001371 result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
1372 {
1373 // Batch 0, Channel 0
1374 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001375
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001376 // Batch 0, Channel 1
1377 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001378
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001379 // Batch 0, Channel 2
1380 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001381
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001382 // Batch 1, Channel 0
1383 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001384
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001385 // Batch 1, Channel 1
1386 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001387
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001388 // Batch 1, Channel 2
1389 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
1390 },
1391 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001392
1393 return result;
1394}
1395
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001396template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001397LayerTestResult<T, 4> Concat4dTestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001398 IWorkloadFactory& workloadFactory,
1399 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1400 const TensorInfo& outputTensorInfo,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001401 unsigned int dimension,
1402 bool useSubtensor,
1403 float qScale,
1404 int32_t qOffset)
1405{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001406 TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001407
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001408 auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1409 {
1410 1.0f, 2.0f,
1411 3.0f, 4.0f,
1412 5.0f, 6.0f,
1413 7.0f, 8.0f,
1414 9.0f, 10.0f,
1415 11.0f, 12.0f
1416 },
1417 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001418
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001419 auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1420 {
1421 11.0f, 12.0f,
1422 13.0f, 14.0f,
1423 15.0f, 16.0f,
1424 17.0f, 18.0f,
1425 19.0f, 20.0f,
1426 21.0f, 22.0f
1427 },
1428 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001429
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001430 auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
1431 {
1432 21.0f, 22.0f,
1433 23.0f, 24.0f,
1434 25.0f, 26.0f,
1435 27.0f, 28.0f,
1436 29.0f, 30.0f,
1437 31.0f, 32.0f
1438 },
1439 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001440
1441 LayerTestResult<T, 4> result(outputTensorInfo);
1442
1443 std::vector<T> output;
1444 output.resize(outputTensorInfo.GetNumElements());
1445
1446 Concatenate<T>(workloadFactory,
1447 memoryManager,
1448 {inputTensorInfo, inputTensorInfo, inputTensorInfo},
1449 {input0.data(), input1.data(), input2.data()},
1450 outputTensorInfo,
1451 output.data(),
1452 dimension,
1453 useSubtensor);
1454
1455 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1456 return result;
1457}
1458
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001459template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001460LayerTestResult<T, 4> Concat4dDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001461 IWorkloadFactory& workloadFactory,
1462 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001463 float qScale,
1464 int32_t qOffset)
1465{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001466 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001467
1468 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1469 workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
1470
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001471 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1472 {
1473 1.0f, 2.0f,
1474 3.0f, 4.0f,
1475 5.0f, 6.0f,
1476 7.0f, 8.0f,
1477 9.0f, 10.0f,
1478 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001479
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001480 11.0f, 12.0f,
1481 13.0f, 14.0f,
1482 15.0f, 16.0f,
1483 17.0f, 18.0f,
1484 19.0f, 20.0f,
1485 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001486
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001487 21.0f, 22.0f,
1488 23.0f, 24.0f,
1489 25.0f, 26.0f,
1490 27.0f, 28.0f,
1491 29.0f, 30.0f,
1492 31.0f, 32.0f
1493 },
1494 qScale, qOffset));
1495
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001496 return result;
1497}
1498
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001499template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001500LayerTestResult<T, 4> Concat4dDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001501 IWorkloadFactory& workloadFactory,
1502 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001503 float qScale,
1504 int32_t qOffset)
1505{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001506 TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001507
1508 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1509 workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
1510
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001511 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1512 {
1513 1.0f, 2.0f,
1514 3.0f, 4.0f,
1515 5.0f, 6.0f,
1516 7.0f, 8.0f,
1517 9.0f, 10.0f,
1518 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001519
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001520 11.0f, 12.0f,
1521 13.0f, 14.0f,
1522 15.0f, 16.0f,
1523 17.0f, 18.0f,
1524 19.0f, 20.0f,
1525 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001526
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001527 21.0f, 22.0f,
1528 23.0f, 24.0f,
1529 25.0f, 26.0f,
1530 27.0f, 28.0f,
1531 29.0f, 30.0f,
1532 31.0f, 32.0f
1533 },
1534 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001535
1536 return result;
1537}
1538
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001539template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001540LayerTestResult<T, 4> Concat4dDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001541 IWorkloadFactory& workloadFactory,
1542 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001543 float qScale,
1544 int32_t qOffset)
1545{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001546 TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001547
1548 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1549 workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
1550
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001551 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1552 {
1553 1.0f, 2.0f,
1554 3.0f, 4.0f,
1555 11.0f, 12.0f,
1556 13.0f, 14.0f,
1557 21.0f, 22.0f,
1558 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001559
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001560 5.0f, 6.0f,
1561 7.0f, 8.0f,
1562 15.0f, 16.0f,
1563 17.0f, 18.0f,
1564 25.0f, 26.0f,
1565 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001566
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001567 9.0f, 10.0f,
1568 11.0f, 12.0f,
1569 19.0f, 20.0f,
1570 21.0f, 22.0f,
1571 29.0f, 30.0f,
1572 31.0f, 32.0f
1573 },
1574 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001575
1576 return result;
1577}
1578
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001579template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001580LayerTestResult<T, 4> Concat4dDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001581 IWorkloadFactory& workloadFactory,
1582 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001583 float qScale,
1584 int32_t qOffset,
1585 bool useSubtensor)
1586{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001587 TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001588
1589 LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
1590 workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
1591
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001592 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1593 {
1594 1.0f, 2.0f,
1595 11.0f, 12.0f,
1596 21.0f, 22.0f,
1597 3.0f, 4.0f,
1598 13.0f, 14.0f,
1599 23.0f, 24.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001600
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001601 5.0f, 6.0f,
1602 15.0f, 16.0f,
1603 25.0f, 26.0f,
1604 7.0f, 8.0f,
1605 17.0f, 18.0f,
1606 27.0f, 28.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001607
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001608 9.0f, 10.0f,
1609 19.0f, 20.0f,
1610 29.0f, 30.0f,
1611 11.0f, 12.0f,
1612 21.0f, 22.0f,
1613 31.0f, 32.0f
1614 },
1615 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001616
1617 return result;
1618}
1619
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001620template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001621LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001622 IWorkloadFactory& workloadFactory,
1623 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001624 float qScale,
1625 int32_t qOffset)
1626{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001627 constexpr unsigned int dimension = 0u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001628
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001629 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1630 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1631 {
1632 1.0f, 2.0f,
1633 3.0f, 4.0f,
1634 5.0f, 6.0f,
1635 7.0f, 8.0f,
1636 9.0f, 10.0f,
1637 11.0f, 12.0f
1638 },
1639 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001640
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001641 TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001642
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001643 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1644 {
1645 11.0f, 12.0f,
1646 13.0f, 14.0f,
1647 15.0f, 16.0f,
1648 17.0f, 18.0f,
1649 19.0f, 20.0f,
1650 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001651
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001652 21.0f, 22.0f,
1653 23.0f, 24.0f,
1654 25.0f, 26.0f,
1655 27.0f, 28.0f,
1656 29.0f, 30.0f,
1657 31.0f, 32.0f
1658 },
1659 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001660
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001661 TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001662
1663 LayerTestResult<T, 4> result(outputTensorInfo);
1664
1665 std::vector<T> output;
1666 output.resize(outputTensorInfo.GetNumElements());
1667 Concatenate<T>(workloadFactory,
1668 memoryManager,
1669 {inputTensorInfo0, inputTensorInfo1},
1670 {input0.data(), input1.data()},
1671 outputTensorInfo,
1672 output.data(),
1673 dimension,
1674 true);
1675
1676 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001677 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1678 {
1679 1.0f, 2.0f,
1680 3.0f, 4.0f,
1681 5.0f, 6.0f,
1682 7.0f, 8.0f,
1683 9.0f, 10.0f,
1684 11.0f, 12.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001685
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001686 11.0f, 12.0f,
1687 13.0f, 14.0f,
1688 15.0f, 16.0f,
1689 17.0f, 18.0f,
1690 19.0f, 20.0f,
1691 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001692
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001693 21.0f, 22.0f,
1694 23.0f, 24.0f,
1695 25.0f, 26.0f,
1696 27.0f, 28.0f,
1697 29.0f, 30.0f,
1698 31.0f, 32.0f
1699 },
1700 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001701
1702 return result;
1703}
1704
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001705template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001706LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001707 IWorkloadFactory& workloadFactory,
1708 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001709 float qScale,
1710 int32_t qOffset)
1711{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001712 constexpr unsigned int dimension = 1u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001713
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001714 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1715 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1716 {
1717 1.0f, 2.0f,
1718 3.0f, 4.0f,
1719 5.0f, 6.0f,
1720 7.0f, 8.0f,
1721 9.0f, 10.0f,
1722 11.0f, 12.0f
1723 },
1724 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001725
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001726 TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001727
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001728 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1729 {
1730 11.0f, 12.0f,
1731 13.0f, 14.0f,
1732 15.0f, 16.0f,
1733 17.0f, 18.0f,
1734 },
1735 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001736
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001737 TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001738
1739 LayerTestResult<T, 4> result(outputTensorInfo);
1740
1741 std::vector<T> output;
1742 output.resize(outputTensorInfo.GetNumElements());
1743 Concatenate<T>(workloadFactory,
1744 memoryManager,
1745 {inputTensorInfo0, inputTensorInfo1},
1746 {input0.data(), input1.data()},
1747 outputTensorInfo,
1748 output.data(),
1749 dimension,
1750 true);
1751
1752 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001753 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1754 {
1755 1.0f, 2.0f,
1756 3.0f, 4.0f,
1757 5.0f, 6.0f,
1758 7.0f, 8.0f,
1759 9.0f, 10.0f,
1760 11.0f, 12.0f,
1761 11.0f, 12.0f,
1762 13.0f, 14.0f,
1763 15.0f, 16.0f,
1764 17.0f, 18.0f
1765 },
1766 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001767
1768 return result;
1769}
1770
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001771template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001772LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001773 IWorkloadFactory& workloadFactory,
1774 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001775 float qScale,
1776 int32_t qOffset)
1777{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001778 constexpr unsigned int dimension = 2u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001779
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001780 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1781 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1782 {
1783 1.0f, 2.0f,
1784 3.0f, 4.0f,
1785 5.0f, 6.0f,
1786 7.0f, 8.0f,
1787 9.0f, 10.0f,
1788 11.0f, 12.0f
1789 },
1790 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001791
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001792 TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
1793 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1794 {
1795 11.0f, 12.0f,
1796 13.0f, 14.0f,
1797 15.0f, 16.0f,
1798 17.0f, 18.0f,
1799 19.0f, 20.0f,
1800 21.0f, 22.0f,
1801 23.0f, 24.0f,
1802 25.0f, 26.0f,
1803 27.0f, 28.0f
1804 },
1805 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001806
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001807 TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001808 LayerTestResult<T, 4> result(outputTensorInfo);
1809
1810 std::vector<T> output;
1811 output.resize(outputTensorInfo.GetNumElements());
1812 Concatenate<T>(workloadFactory,
1813 memoryManager,
1814 {inputTensorInfo0, inputTensorInfo1},
1815 {input0.data(), input1.data()},
1816 outputTensorInfo,
1817 output.data(),
1818 dimension,
1819 true);
1820
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001821 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
1822 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1823 {
1824 1.0f, 2.0f,
1825 3.0f, 4.0f,
1826 11.0f, 12.0f,
1827 13.0f, 14.0f,
1828 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001829
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001830 5.0f, 6.0f,
1831 7.0f, 8.0f,
1832 17.0f, 18.0f,
1833 19.0f, 20.0f,
1834 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001835
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001836 9.0f, 10.0f,
1837 11.0f, 12.0f,
1838 23.0f, 24.0f,
1839 25.0f, 26.0f,
1840 27.0f, 28.0f
1841 },
1842 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001843
1844 return result;
1845}
1846
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001847template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001848LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001849 IWorkloadFactory& workloadFactory,
1850 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001851 float qScale,
1852 int32_t qOffset,
1853 bool useSubtensor)
1854{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001855 constexpr unsigned int dimension = 3u;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001856
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001857 TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
1858 auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
1859 {
1860 1.0f, 2.0f,
1861 3.0f, 4.0f,
1862 5.0f, 6.0f,
1863 7.0f, 8.0f,
1864 9.0f, 10.0f,
1865 11.0f, 12.0f
1866 },
1867 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001868
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001869 TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
1870 auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
1871 {
1872 11.0f, 12.0f, 13.0f,
1873 14.0f, 15.0f, 16.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001874
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001875 17.0f, 18.0f, 19.0f,
1876 20.0f, 21.0f, 22.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001877
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001878 23.0f, 24.0f, 25.0f,
1879 26.0f, 27.0f, 28.0f
1880 },
1881 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001882
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001883 TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001884
1885 LayerTestResult<T, 4> result(outputTensorInfo);
1886
1887 std::vector<T> output;
1888 output.resize(outputTensorInfo.GetNumElements());
1889 Concatenate<T>(workloadFactory,
1890 memoryManager,
1891 {inputTensorInfo0, inputTensorInfo1},
1892 {input0.data(), input1.data()},
1893 outputTensorInfo,
1894 output.data(),
1895 dimension,
1896 useSubtensor);
1897
1898 result.output = MakeTensor<T, 4>(outputTensorInfo, output);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001899 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
1900 {
1901 1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
1902 3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
1903 5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
1904 7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
1905 9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
1906 11.0f, 12.0f, 26.0f, 27.0f, 28.0f
1907 },
1908 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001909
1910 return result;
1911}
1912
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001913template<DataType ArmnnType, typename T>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001914LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001915 IWorkloadFactory& workloadFactory,
1916 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001917 bool useSubtensor)
1918{
1919 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001920 TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
1921 TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
1922 TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001923
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001924 std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001925
1926 // Quantized input1 tensor.
1927 const float inputScale1 = 0.5f;
1928 const int32_t inputOffset1 = 5;
1929
1930 auto input1 = MakeTensor<T, 3>(inputTensorInfo1, std::vector<T>(
1931 {
1932 1, 2, 3,
1933 4, 5, 6,
1934 7, 8, 9,
1935 10, 11, 12,
1936 13, 14, 15,
1937 16, 17, 18,
1938
1939 19, 20, 21,
1940 22, 23, 24,
1941 25, 26, 27,
1942 28, 29, 30,
1943 31, 32, 33,
1944 34, 35, 36
1945 }));
1946
1947 // Quatized input2 tensor.
1948 const float inputScale2 = 0.2f;
1949 const int32_t inputOffset2 = 10;
1950
1951 auto input2 = MakeTensor<T, 3>(inputTensorInfo2, std::vector<T>(
1952 {
1953 37, 38, 39,
1954 40, 41, 42,
1955 43, 44, 45,
1956 46, 47, 48,
1957 49, 50, 51,
1958 52, 53, 54
1959 }));
1960
1961 // Quantized output tensor.
1962 const float outputScale = 0.1f;
1963 const int32_t outputOffset = 20;
1964
1965 LayerTestResult<T, 3> ret(outputTensorInfo);
1966
1967 ret.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(
1968 {
1969 0, 5, 74,
1970 10, 15, 76,
1971 20, 25, 78,
1972 30, 35, 80,
1973 40, 45, 82,
1974 50, 55, 84,
1975
1976 60, 65, 86,
1977 70, 75, 88,
1978 80, 85, 90,
1979 90, 95, 92,
1980 100, 105, 94,
1981 110, 115, 96,
1982
1983 120, 125, 98,
1984 130, 135, 100,
1985 140, 145, 102,
1986 150, 155, 104,
1987 160, 165, 106,
1988 170, 175, 108
1989 }));
1990
1991 outputTensorInfo.SetQuantizationScale(outputScale);
1992 outputTensorInfo.SetQuantizationOffset(outputOffset);
1993 inputTensorInfo1.SetQuantizationScale(inputScale1);
1994 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
1995 inputTensorInfo2.SetQuantizationScale(inputScale2);
1996 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
1997
1998 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001999 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002000
2001 std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002002 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002003
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002004 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002005
2006 bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
2007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002008 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002009 subTensorsSupported ?
2010 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2011 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2012
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002013 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002014 subTensorsSupported ?
2015 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2016 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2017
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002018 ConcatQueueDescriptor data;
2019 OriginsDescriptor desc = CreateDescriptorForConcatenation(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002020 inputTensorShapes.begin(),inputTensorShapes.end(), 2);
2021 data.m_Parameters = desc;
2022
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002023 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002024 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2025 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2026 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2027
2028 data.m_ViewOrigins.push_back(window1);
2029 data.m_ViewOrigins.push_back(window2);
2030
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002031 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002032
2033 inputHandle1->Allocate();
2034 inputHandle2->Allocate();
2035 outputHandle->Allocate();
2036
2037 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2038 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2039
2040 workload->PostAllocationConfigure();
2041 workload->Execute();
2042
2043 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2044
2045 return ret;
2046}
2047
2048//
2049// Explicit template specializations
2050//
2051
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002052template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
2053ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
2054 IWorkloadFactory& workloadFactory,
2055 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002056 bool useSubtensor);
2057
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002058template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
2059ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
2060 IWorkloadFactory& workloadFactory,
2061 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002062 bool useSubtensor);
2063
2064//
2065// Implementation functions
2066//
2067
2068LayerTestResult<float,3> ConcatTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002069 IWorkloadFactory& workloadFactory,
2070 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002071{
2072 unsigned int outputWidth = 3;
2073 unsigned int outputHeight = 6;
2074 unsigned int outputChannels = 3;
2075
2076 unsigned int inputWidth1 = 3;
2077 unsigned int inputHeight1 = 6;
2078 unsigned int inputChannels1 = 2;
2079
2080 unsigned int inputWidth2 = 3;
2081 unsigned int inputHeight2 = 6;
2082 unsigned int inputChannels2 = 1;
2083
2084 // Define the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002085 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
2086 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
2087 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002088
2089 LayerTestResult<float,3> ret(outputTensorInfo);
2090
2091 ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
2092 {
2093 1.0f, 2.0f, 3.0f,
2094 4.0f, 5.0f, 6.0f,
2095 7.0f, 8.0f, 9.0f,
2096 10.0f, 11.0f, 12.0f,
2097 13.0f, 14.0f, 15.0f,
2098 16.0f, 17.0f, 18.0f,
2099
2100 19.0f, 20.0f, 21.0f,
2101 22.0f, 23.0f, 24.0f,
2102 25.0f, 26.0f, 27.0f,
2103 28.0f, 29.0f, 30.0f,
2104 31.0f, 32.0f, 33.0f,
2105 34.0f, 35.0f, 36.0f,
2106
2107 37.0f, 38.0f, 39.0f,
2108 40.0f, 41.0f, 42.0f,
2109 43.0f, 44.0f, 45.0f,
2110 46.0f, 47.0f, 48.0f,
2111 49.0f, 50.0f, 51.0f,
2112 52.0f, 53.0f, 54.0f,
2113 })
2114 );
2115
2116 auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
2117 {
2118 1.0f, 2.0f, 3.0f,
2119 4.0f, 5.0f, 6.0f,
2120 7.0f, 8.0f, 9.0f,
2121 10.0f, 11.0f, 12.0f,
2122 13.0f, 14.0f, 15.0f,
2123 16.0f, 17.0f, 18.0f,
2124
2125 19.0f, 20.0f, 21.0f,
2126 22.0f, 23.0f, 24.0f,
2127 25.0f, 26.0f, 27.0f,
2128 28.0f, 29.0f, 30.0f,
2129 31.0f, 32.0f, 33.0f,
2130 34.0f, 35.0f, 36.0f,
2131 })
2132 );
2133
2134 auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
2135 {
2136 37.0f, 38.0f, 39.0f,
2137 40.0f, 41.0f, 42.0f,
2138 43.0f, 44.0f, 45.0f,
2139 46.0f, 47.0f, 48.0f,
2140 49.0f, 50.0f, 51.0f,
2141 52.0f, 53.0f, 54.0f,
2142 })
2143 );
2144
2145 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002146 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002147
2148 std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002149 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002150
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002151 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002152
2153 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2154
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002155 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002156 subTensorsSupported ?
2157 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2158 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2159
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002160 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002161 subTensorsSupported ?
2162 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2163 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2164
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002165 ConcatQueueDescriptor data;
2166 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002167 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2168 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2169 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2170
2171 data.m_ViewOrigins.push_back(window1);
2172 data.m_ViewOrigins.push_back(window2);
2173
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002174 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002175
2176 inputHandle1->Allocate();
2177 inputHandle2->Allocate();
2178 outputHandle->Allocate();
2179
2180 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2181 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2182
2183 workload->PostAllocationConfigure();
2184 workload->Execute();
2185
2186 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2187
2188 return ret;
2189}
2190
2191LayerTestResult<float, 1> Concat1dTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002192 IWorkloadFactory& workloadFactory,
2193 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002194{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002195 return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002196}
2197
2198LayerTestResult<float, 2> Concat2dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002199 IWorkloadFactory& workloadFactory,
2200 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002201{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002202 return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002203}
2204
2205LayerTestResult<float, 2> Concat2dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002206 IWorkloadFactory& workloadFactory,
2207 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002208{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002209 return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002210}
2211
2212LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002213 IWorkloadFactory& workloadFactory,
2214 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002215{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002216 return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002217}
2218
2219LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002220 IWorkloadFactory& workloadFactory,
2221 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002222{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002223 return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002224}
2225
2226LayerTestResult<float, 3> Concat3dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002227 IWorkloadFactory& workloadFactory,
2228 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002229{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002230 return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002231}
2232
2233LayerTestResult<float, 3> Concat3dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002234 IWorkloadFactory& workloadFactory,
2235 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002236{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002237 return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002238}
2239
2240LayerTestResult<float, 3> Concat3dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002241 IWorkloadFactory& workloadFactory,
2242 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002243 bool useSubtensor)
2244{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002245 return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002246}
2247
2248LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002249 IWorkloadFactory& workloadFactory,
2250 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002251{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002252 return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002253 workloadFactory, memoryManager, 0.0f, 0);
2254}
2255
2256LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002257 IWorkloadFactory& workloadFactory,
2258 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002259{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002260 return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002261}
2262
2263LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002264 IWorkloadFactory& workloadFactory,
2265 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002266 bool useSubtensor)
2267{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002268 return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002269 workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
2270}
2271
2272LayerTestResult<float, 4> Concat4dDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002273 IWorkloadFactory& workloadFactory,
2274 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002275{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002276 return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002277}
2278
2279LayerTestResult<float, 4> Concat4dDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002280 IWorkloadFactory& workloadFactory,
2281 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002282{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002283 return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002284}
2285
2286LayerTestResult<float, 4> Concat4dDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002287 IWorkloadFactory& workloadFactory,
2288 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002289{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002290 return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002291}
2292
2293LayerTestResult<float, 4> Concat4dDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002294 IWorkloadFactory& workloadFactory,
2295 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002296 bool useSubtensor)
2297{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002298 return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002299}
2300
2301LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002302 IWorkloadFactory& workloadFactory,
2303 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002304{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002305 return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002306}
2307
2308LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002309 IWorkloadFactory& workloadFactory,
2310 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002311{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002312 return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002313 workloadFactory, memoryManager, 0.0f, 0);
2314}
2315
2316LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002317 IWorkloadFactory& workloadFactory,
2318 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002319{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002320 return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002321}
2322
2323LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002324 IWorkloadFactory& workloadFactory,
2325 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002326 bool useSubtensor)
2327{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002328 return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002329 workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
2330}
2331
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002332LayerTestResult<Half, 3> ConcatFloat16Test(
2333 IWorkloadFactory& workloadFactory,
2334 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Matthew Jackson9bff1442019-09-12 09:08:23 +01002335{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002336 return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
Matthew Jackson9bff1442019-09-12 09:08:23 +01002337}
2338
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002339LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002340 IWorkloadFactory& workloadFactory,
2341 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002342{
2343 unsigned int outputWidth = 3;
2344 unsigned int outputHeight = 6;
2345 unsigned int outputChannels = 3;
2346
2347 unsigned int inputWidth1 = 3;
2348 unsigned int inputHeight1 = 6;
2349 unsigned int inputChannels1 = 2;
2350
2351 unsigned int inputWidth2 = 3;
2352 unsigned int inputHeight2 = 6;
2353 unsigned int inputChannels2 = 1;
2354
2355 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002356 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
2357 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
2358 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002359
2360 // Quantized input1 tensor. Range [-3, 1]
2361 const float inputScale1 = 0.015686f;
2362 const int32_t inputOffset1 = 192;
2363
2364 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2365 {
2366 1, 2, 3,
2367 4, 5, 6,
2368 7, 8, 9,
2369 10, 11, 12,
2370 13, 14, 15,
2371 16, 17, 18,
2372
2373 19, 20, 21,
2374 22, 23, 24,
2375 25, 26, 27,
2376 28, 29, 30,
2377 31, 32, 33,
2378 34, 35, 36,
2379 })
2380 );
2381
2382 // Quatized input2 tensor. Range [-1, 4]
2383 const float inputScale2 = 0.019608f;
2384 const int32_t inputOffset2 = 50;
2385
2386 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2387 {
2388 37, 38, 39,
2389 40, 41, 42,
2390 43, 44, 45,
2391 46, 47, 48,
2392 49, 50, 51,
2393 52, 53, 54,
2394 })
2395 );
2396
2397 // Output has the same quantization parameters than input1,
2398 // so that only the requantization of input2 is required
2399 const float outputScale = 0.015686f;
2400 const int32_t outputOffset = 192;
2401
2402 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2403
2404 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2405 {
2406 1, 2, 3,
2407 4, 5, 6,
2408 7, 8, 9,
2409 10, 11, 12,
2410 13, 14, 15,
2411 16, 17, 18,
2412
2413 19, 20, 21,
2414 22, 23, 24,
2415 25, 26, 27,
2416 28, 29, 30,
2417 31, 32, 33,
2418 34, 35, 36,
2419
2420 176, 177, 178,
2421 179, 181, 182,
2422 183, 184, 186,
2423 187, 188, 189,
2424 191, 192, 193,
2425 195, 196, 197,
2426 })
2427 );
2428
2429 outputTensorInfo.SetQuantizationScale(outputScale);
2430 outputTensorInfo.SetQuantizationOffset(outputOffset);
2431 inputTensorInfo1.SetQuantizationScale(inputScale1);
2432 inputTensorInfo1.SetQuantizationOffset(inputOffset1);
2433 inputTensorInfo2.SetQuantizationScale(inputScale2);
2434 inputTensorInfo2.SetQuantizationOffset(inputOffset2);
2435
2436 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002437 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002438
2439 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002440 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002441
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002442 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002443
2444 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2445
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002446 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002447 subTensorsSupported ?
2448 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2449 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2450
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002451 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002452 subTensorsSupported ?
2453 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2454 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2455
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002456 ConcatQueueDescriptor data;
2457 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002458 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2459 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2460 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2461
2462 data.m_ViewOrigins.push_back(window1);
2463 data.m_ViewOrigins.push_back(window2);
2464
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002465 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002466
2467 inputHandle1->Allocate();
2468 inputHandle2->Allocate();
2469 outputHandle->Allocate();
2470
2471 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2472 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2473
2474 workload->PostAllocationConfigure();
2475 workload->Execute();
2476
2477 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2478
2479 return ret;
2480}
2481
2482LayerTestResult<uint8_t, 3> ConcatUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002483 IWorkloadFactory& workloadFactory,
2484 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002485{
2486 unsigned int outputWidth = 3;
2487 unsigned int outputHeight = 6;
2488 unsigned int outputChannels = 3;
2489
2490 unsigned int inputWidth1 = 3;
2491 unsigned int inputHeight1 = 6;
2492 unsigned int inputChannels1 = 2;
2493
2494 unsigned int inputWidth2 = 3;
2495 unsigned int inputHeight2 = 6;
2496 unsigned int inputChannels2 = 1;
2497
2498 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002499 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
2500 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
2501 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002502
2503 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2504 const float scale = 0.13497836f;
2505 const int32_t offset = -7;
2506
2507 outputTensorInfo.SetQuantizationScale(scale);
2508 outputTensorInfo.SetQuantizationOffset(offset);
2509 inputTensorInfo1.SetQuantizationScale(scale);
2510 inputTensorInfo1.SetQuantizationOffset(offset);
2511 inputTensorInfo2.SetQuantizationScale(scale);
2512 inputTensorInfo2.SetQuantizationOffset(offset);
2513
2514 LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
2515
2516 ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
2517 {
2518 1, 2, 3,
2519 4, 5, 6,
2520 7, 8, 9,
2521 10, 11, 12,
2522 13, 14, 15,
2523 16, 17, 18,
2524
2525 19, 20, 21,
2526 22, 23, 24,
2527 25, 26, 27,
2528 28, 29, 30,
2529 31, 32, 33,
2530 34, 35, 36,
2531
2532 37, 38, 39,
2533 40, 41, 42,
2534 43, 44, 45,
2535 46, 47, 48,
2536 49, 50, 51,
2537 52, 53, 54,
2538 })
2539 );
2540
2541 auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
2542 {
2543 1, 2, 3,
2544 4, 5, 6,
2545 7, 8, 9,
2546 10, 11, 12,
2547 13, 14, 15,
2548 16, 17, 18,
2549
2550 19, 20, 21,
2551 22, 23, 24,
2552 25, 26, 27,
2553 28, 29, 30,
2554 31, 32, 33,
2555 34, 35, 36,
2556 })
2557 );
2558
2559 auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
2560 {
2561 37, 38, 39,
2562 40, 41, 42,
2563 43, 44, 45,
2564 46, 47, 48,
2565 49, 50, 51,
2566 52, 53, 54,
2567 })
2568 );
2569
2570 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002571 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002572
2573 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002574 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002575
2576
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002577 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002578
2579 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2580
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002581 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002582 subTensorsSupported ?
2583 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2584 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2585
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002586 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002587 subTensorsSupported ?
2588 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2589 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2590
2591
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002592 ConcatQueueDescriptor data;
2593 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002594 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2595 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2596 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2597
2598 data.m_ViewOrigins.push_back(window1);
2599 data.m_ViewOrigins.push_back(window2);
2600
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002601 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002602
2603 inputHandle1->Allocate();
2604 inputHandle2->Allocate();
2605 outputHandle->Allocate();
2606
2607 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2608 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2609
2610 workload->PostAllocationConfigure();
2611 workload->Execute();
2612
2613 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2614
2615 return ret;
2616}
2617
2618LayerTestResult<uint16_t, 3> ConcatUint16Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002619 IWorkloadFactory& workloadFactory,
2620 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002621{
2622 unsigned int outputWidth = 3;
2623 unsigned int outputHeight = 6;
2624 unsigned int outputChannels = 3;
2625
2626 unsigned int inputWidth1 = 3;
2627 unsigned int inputHeight1 = 6;
2628 unsigned int inputChannels1 = 2;
2629
2630 unsigned int inputWidth2 = 3;
2631 unsigned int inputHeight2 = 6;
2632 unsigned int inputChannels2 = 1;
2633
2634 // Defines the tensor descriptors.
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002635 TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
2636 TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
2637 TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002638
2639 // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
2640 const float scale = 0.13497836f;
2641 const int32_t offset = -7;
2642
2643 outputTensorInfo.SetQuantizationScale(scale);
2644 outputTensorInfo.SetQuantizationOffset(offset);
2645 inputTensorInfo1.SetQuantizationScale(scale);
2646 inputTensorInfo1.SetQuantizationOffset(offset);
2647 inputTensorInfo2.SetQuantizationScale(scale);
2648 inputTensorInfo2.SetQuantizationOffset(offset);
2649
2650 LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
2651
2652 ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
2653 {
2654 1, 2, 3,
2655 4, 5, 6,
2656 7, 8, 9,
2657 10, 11, 12,
2658 13, 14, 15,
2659 16, 17, 18,
2660
2661 19, 20, 21,
2662 22, 23, 24,
2663 25, 26, 27,
2664 28, 29, 30,
2665 31, 32, 33,
2666 34, 35, 36,
2667
2668 37, 38, 39,
2669 40, 41, 42,
2670 43, 44, 45,
2671 46, 47, 48,
2672 49, 50, 51,
2673 52, 53, 54,
2674 }));
2675
2676 auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
2677 {
2678 1, 2, 3,
2679 4, 5, 6,
2680 7, 8, 9,
2681 10, 11, 12,
2682 13, 14, 15,
2683 16, 17, 18,
2684
2685 19, 20, 21,
2686 22, 23, 24,
2687 25, 26, 27,
2688 28, 29, 30,
2689 31, 32, 33,
2690 34, 35, 36,
2691 }));
2692
2693 auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
2694 {
2695 37, 38, 39,
2696 40, 41, 42,
2697 43, 44, 45,
2698 46, 47, 48,
2699 49, 50, 51,
2700 52, 53, 54,
2701 }));
2702
2703 std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002704 ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002705
2706 std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002707 ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002708
2709
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002710 std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002711
2712 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
2713
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002714 std::unique_ptr<ITensorHandle> inputHandle1 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002715 subTensorsSupported ?
2716 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
2717 workloadFactory.CreateTensorHandle(inputTensorInfo1);
2718
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002719 std::unique_ptr<ITensorHandle> inputHandle2 =
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002720 subTensorsSupported ?
2721 workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
2722 workloadFactory.CreateTensorHandle(inputTensorInfo2);
2723
2724
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002725 ConcatQueueDescriptor data;
2726 WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002727 AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
2728 AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
2729 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2730
2731 data.m_ViewOrigins.push_back(window1);
2732 data.m_ViewOrigins.push_back(window2);
2733
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002734 std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002735
2736 inputHandle1->Allocate();
2737 inputHandle2->Allocate();
2738 outputHandle->Allocate();
2739
2740 CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
2741 CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
2742
2743 workload->PostAllocationConfigure();
2744 workload->Execute();
2745
2746 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
2747
2748 return ret;
2749}
2750
2751LayerTestResult<uint8_t, 1> Concat1dUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002752 IWorkloadFactory& workloadFactory,
2753 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002754{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002755 return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002756}
2757
2758LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002759 IWorkloadFactory& workloadFactory,
2760 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002761{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002762 return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002763}
2764
2765LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002766 IWorkloadFactory& workloadFactory,
2767 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002768{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002769 return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002770}
2771
2772LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002773 IWorkloadFactory& workloadFactory,
2774 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002775{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002776 return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002777 workloadFactory, memoryManager, 0.5f, -1);
2778}
2779
2780LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002781 IWorkloadFactory& workloadFactory,
2782 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002783{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002784 return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002785 workloadFactory, memoryManager, 0.5f, -1);
2786}
2787
2788LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002789 IWorkloadFactory& workloadFactory,
2790 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002791{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002792 return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002793}
2794
2795LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002796 IWorkloadFactory& workloadFactory,
2797 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002798{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002799 return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002800}
2801
2802LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002803 IWorkloadFactory& workloadFactory,
2804 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002805 bool useSubtensor)
2806{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002807 return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002808 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2809}
2810
2811LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002812 IWorkloadFactory& workloadFactory,
2813 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002814{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002815 return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002816}
2817
2818LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002819 IWorkloadFactory& workloadFactory,
2820 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002821{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002822 return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002823 workloadFactory, memoryManager, 0.5f, -1);
2824}
2825
2826LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002827 IWorkloadFactory& workloadFactory,
2828 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002829 bool useSubtensor)
2830{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002831 return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002832 workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
2833}
2834
2835LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002836 IWorkloadFactory& workloadFactory,
2837 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002838{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002839 return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002840}
2841
2842LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002843 IWorkloadFactory& workloadFactory,
2844 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002845{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002846 return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002847}
2848
2849LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002850 IWorkloadFactory& workloadFactory,
2851 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002852{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002853 return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002854}
2855
2856LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002857 IWorkloadFactory& workloadFactory,
2858 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002859{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002860 return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002861 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2862}
2863
2864LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002865 IWorkloadFactory& workloadFactory,
2866 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002867{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002868 return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002869 workloadFactory, memoryManager, 0.5f, -1);
2870}
2871
2872LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002873 IWorkloadFactory& workloadFactory,
2874 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002875{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002876 return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002877 workloadFactory, memoryManager, 0.5f, -1);
2878}
2879
2880LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002881 IWorkloadFactory& workloadFactory,
2882 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002883{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002884 return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002885 workloadFactory, memoryManager, 0.5f, -1);
2886}
2887
2888LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002889 IWorkloadFactory& workloadFactory,
2890 const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002891 bool useSubtensor)
2892{
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002893 return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002894 workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
2895}