blob: 18821b95493f5d7b347e44f77a5cf1b586f6d6c6 [file] [log] [blame]
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001//
2// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReduceSumTestImpl.hpp"
7
8#include <backendsCommon/test/DataTypeUtils.hpp>
9#include <backendsCommon/test/TensorCopyUtils.hpp>
10#include <backendsCommon/test/WorkloadTestUtils.hpp>
11
12#include <test/TensorHelpers.hpp>
13
14namespace
15{
16
17template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18LayerTestResult<float, 4> ReduceTestCommon(
19 armnn::IWorkloadFactory& workloadFactory,
20 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
21 const armnn::ITensorHandleFactory& tensorHandleFactory,
22 const armnn::TensorInfo inputTensorInfo,
23 const armnn::TensorInfo outputTensorInfo,
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
Sadik Armagana2747482021-02-09 10:28:54 +000027 const armnn::ReduceOperation reduceOperation,
28 bool keepDims = false)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000029{
30 IgnoreUnused(memoryManager);
31 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
32
33 LayerTestResult<float, 4> result(outputTensorInfo);
34 result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
35
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
38
39 armnn::ReduceQueueDescriptor descriptor;
40 std::vector<uint32_t> updated_idx;
41 uint32_t resolvedAxis = 0;
42 for (uint32_t i = 0; i < vAxis.size(); ++i)
43 {
44 if (vAxis[i] < 0)
45 {
46 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
47 } else
48 {
49 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
50 }
51
52 updated_idx.push_back(resolvedAxis);
53 }
54
55 descriptor.m_Parameters.m_vAxis = updated_idx;
56 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
Sadik Armagana2747482021-02-09 10:28:54 +000057 descriptor.m_Parameters.m_KeepDims = keepDims;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000058 armnn::WorkloadInfo info;
59
60 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
61 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
62
63 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
64
65 inputHandle->Allocate();
66 outputHandle->Allocate();
67
68 CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
69
70 workload->Execute();
71
72 CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
73
74 return result;
75}
76
77} // namespace
78
79template<armnn::DataType ArmnnType, typename T>
80LayerTestResult<float, 4> ReduceSumSimpleTest(
81 armnn::IWorkloadFactory& workloadFactory,
82 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
83 const armnn::ITensorHandleFactory& tensorHandleFactory)
84{
85 const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
86 const armnn::TensorShape outputShape{ 1, 1, 1, 1};
87
88 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
89
90 if (armnn::IsQuantizedType<T>())
91 {
92 inputTensorInfo.SetQuantizationScale(1.0f);
93 inputTensorInfo.SetQuantizationOffset(0);
94 }
95
96 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
97
98 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
99 std::vector<float> outputValues({ 34.0f });
100
101 return ReduceTestCommon<ArmnnType>(workloadFactory,
102 memoryManager,
103 tensorHandleFactory,
104 inputTensorInfo,
105 outputTensorInfo,
106 inputValues,
107 outputValues,
108 { -1 },
109 armnn::ReduceOperation::Sum);
110}
111
112template<armnn::DataType ArmnnType, typename T>
113LayerTestResult<float, 4> ReduceSumSingleAxisTest1(
114 armnn::IWorkloadFactory& workloadFactory,
115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
116 const armnn::ITensorHandleFactory& tensorHandleFactory)
117{
118 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
119 const armnn::TensorShape outputShape{ 1, 1, 2, 4};
120
121 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
122
123 if (armnn::IsQuantizedType<T>())
124 {
125 inputTensorInfo.SetQuantizationScale(1.0f);
126 inputTensorInfo.SetQuantizationOffset(0);
127 }
128
129 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
130
131 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
132 5.0f, 6.0f, 7.0f, 8.0f,
133
134 10.0f, 20.0f, 30.0f, 40.0f,
135 50.0f, 60.0f, 70.0f, 80.0f,
136
137 100.0f, 200.0f, 300.0f, 400.0f,
138 500.0f, 600.0f, 700.0f, 800.0f });
139 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
140 555.0f, 666.0f, 777.0f, 888.0f });
141
142 return ReduceTestCommon<ArmnnType>(workloadFactory,
143 memoryManager,
144 tensorHandleFactory,
145 inputTensorInfo,
146 outputTensorInfo,
147 inputValues,
148 outputValues,
149 { 1 },
150 armnn::ReduceOperation::Sum);
151}
152
153template<armnn::DataType ArmnnType, typename T>
154LayerTestResult<float, 4> ReduceSumSingleAxisTest2(
155 armnn::IWorkloadFactory& workloadFactory,
156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
157 const armnn::ITensorHandleFactory& tensorHandleFactory)
158{
159 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
160 const armnn::TensorShape outputShape{ 1, 1, 3, 4};
161
162 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
163
164 if (armnn::IsQuantizedType<T>())
165 {
166 inputTensorInfo.SetQuantizationScale(1.0f);
167 inputTensorInfo.SetQuantizationOffset(0);
168 }
169
170 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
171
172 std::vector<float> inputValues( {7, 8, 6, 1,
173 1, 1, 8, 7,
174 3, 7, 7, 7,
175
176 6, 8, 4, 7,
177 3, 8, 7, 3,
178 5, 8, 8, 8,
179
180
181 7, 8, 2, 7,
182 3, 8, 5, 6,
183 8, 4, 2, 7,
184
185 1, 6, 7, 2,
186 8, 3, 3, 1,
187 7, 6, 2, 6,
188
189
190 5, 3, 4, 8,
191 7, 8, 2, 4,
192 6, 6, 2, 8,
193
194 2, 2, 7, 2,
195 5, 3, 6, 3,
196 6, 1, 8, 8});
197 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
198 27.0f, 31.0f, 31.0f, 24.0f,
199 35.0f, 32.0f, 29.0f, 44.0f});
200
201 return ReduceTestCommon<ArmnnType>(workloadFactory,
202 memoryManager,
203 tensorHandleFactory,
204 inputTensorInfo,
205 outputTensorInfo,
206 inputValues,
207 outputValues,
208 { 1 },
209 armnn::ReduceOperation::Sum);
210}
211
212template<armnn::DataType ArmnnType, typename T>
213LayerTestResult<float, 4> ReduceSumSingleAxisTest3(
214 armnn::IWorkloadFactory& workloadFactory,
215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
216 const armnn::ITensorHandleFactory& tensorHandleFactory)
217{
218 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
219 const armnn::TensorShape outputShape{ 1, 6, 3, 1};
220
221 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
222
223 if (armnn::IsQuantizedType<T>())
224 {
225 inputTensorInfo.SetQuantizationScale(1.0f);
226 inputTensorInfo.SetQuantizationOffset(0);
227 }
228
229 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
230
231 std::vector<float> inputValues( {7, 8, 6, 1,
232 1, 1, 8, 7,
233 3, 7, 7, 7,
234
235 6, 8, 4, 7,
236 3, 8, 7, 3,
237 5, 8, 8, 8,
238
239
240 7, 8, 2, 7,
241 3, 8, 5, 6,
242 8, 4, 2, 7,
243
244 1, 6, 7, 2,
245 8, 3, 3, 1,
246 7, 6, 2, 6,
247
248
249 5, 3, 4, 8,
250 7, 8, 2, 4,
251 6, 6, 2, 8,
252
253 2, 2, 7, 2,
254 5, 3, 6, 3,
255 6, 1, 8, 8});
256 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
257 25.0f, 21.0f, 29.0f,
258
259 24.0f, 22.0f, 21.0f,
260 16.0f, 15.0f, 21.0f,
261
262 20.0f, 21.0f, 22.0f,
263 13.0f, 17.0f, 23.0f});
264
265 return ReduceTestCommon<ArmnnType>(workloadFactory,
266 memoryManager,
267 tensorHandleFactory,
268 inputTensorInfo,
269 outputTensorInfo,
270 inputValues,
271 outputValues,
272 { 3 },
Sadik Armagana2747482021-02-09 10:28:54 +0000273 armnn::ReduceOperation::Sum,
274 true);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +0000275}
276
277template<armnn::DataType ArmnnType, typename T>
278LayerTestResult<float, 4> ReduceSumMultipleAxisTest(
279 armnn::IWorkloadFactory& workloadFactory,
280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
281 const armnn::ITensorHandleFactory& tensorHandleFactory)
282{
283 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
284 const armnn::TensorShape outputShape{ 1, 1, 1, 4};
285
286 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
287
288 if (armnn::IsQuantizedType<T>())
289 {
290 inputTensorInfo.SetQuantizationScale(1.0f);
291 inputTensorInfo.SetQuantizationOffset(0);
292 }
293
294 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
295
296 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
297 5.0f, 6.0f, 7.0f, 8.0f,
298
299 10.0f, 20.0f, 30.0f, 40.0f,
300 50.0f, 60.0f, 70.0f, 80.0f,
301
302 100.0f, 200.0f, 300.0f, 400.0f,
303 500.0f, 600.0f, 700.0f, 800.0f });
304 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
305
306 return ReduceTestCommon<ArmnnType>(workloadFactory,
307 memoryManager,
308 tensorHandleFactory,
309 inputTensorInfo,
310 outputTensorInfo,
311 inputValues,
312 outputValues,
313 { 1, 2 },
314 armnn::ReduceOperation::Sum);
315}
316
317// Explicit template specializations
318
319template LayerTestResult<float, 4>
320ReduceSumSimpleTest<armnn::DataType::Float32>(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
323 const armnn::ITensorHandleFactory& tensorHandleFactory);
324
325template LayerTestResult<float, 4>
326ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
327 armnn::IWorkloadFactory& workloadFactory,
328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
329 const armnn::ITensorHandleFactory& tensorHandleFactory);
330
331template LayerTestResult<float, 4>
332ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
333 armnn::IWorkloadFactory& workloadFactory,
334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
335 const armnn::ITensorHandleFactory& tensorHandleFactory);
336
337template LayerTestResult<float, 4>
338ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
339 armnn::IWorkloadFactory& workloadFactory,
340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
341 const armnn::ITensorHandleFactory& tensorHandleFactory);
342
343template LayerTestResult<float, 4>
344ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
345 armnn::IWorkloadFactory& workloadFactory,
346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
347 const armnn::ITensorHandleFactory& tensorHandleFactory);