blob: acb2990c98db5411025f88d44ea5b54669e3fa29 [file] [log] [blame]
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001//
2// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReduceSumTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <DataTypeUtils.hpp>
9#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnnTestUtils/WorkloadTestUtils.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000011
Sadik Armagana097d2a2021-11-24 15:47:28 +000012#include <TensorHelpers.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000013
14namespace
15{
16
17template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18LayerTestResult<float, 4> ReduceTestCommon(
19 armnn::IWorkloadFactory& workloadFactory,
20 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
21 const armnn::ITensorHandleFactory& tensorHandleFactory,
22 const armnn::TensorInfo inputTensorInfo,
23 const armnn::TensorInfo outputTensorInfo,
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
Sadik Armagana2747482021-02-09 10:28:54 +000027 const armnn::ReduceOperation reduceOperation,
28 bool keepDims = false)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000029{
30 IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +010031 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000032
Sadik Armagan483c8112021-06-01 09:24:52 +010033 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000034
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37
38 armnn::ReduceQueueDescriptor descriptor;
39 std::vector<uint32_t> updated_idx;
40 uint32_t resolvedAxis = 0;
41 for (uint32_t i = 0; i < vAxis.size(); ++i)
42 {
43 if (vAxis[i] < 0)
44 {
45 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
46 } else
47 {
48 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
49 }
50
51 updated_idx.push_back(resolvedAxis);
52 }
53
54 descriptor.m_Parameters.m_vAxis = updated_idx;
55 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
Sadik Armagana2747482021-02-09 10:28:54 +000056 descriptor.m_Parameters.m_KeepDims = keepDims;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000057 armnn::WorkloadInfo info;
58
59 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
61
62 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
63
64 inputHandle->Allocate();
65 outputHandle->Allocate();
66
Sadik Armagan483c8112021-06-01 09:24:52 +010067 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000068
69 workload->Execute();
70
Sadik Armagan483c8112021-06-01 09:24:52 +010071 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000072
Sadik Armagan483c8112021-06-01 09:24:52 +010073 return LayerTestResult<float, 4>(actualOutput,
74 outputData,
75 outputHandle->GetShape(),
76 outputTensorInfo.GetShape());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000077}
78
79} // namespace
80
81template<armnn::DataType ArmnnType, typename T>
82LayerTestResult<float, 4> ReduceSumSimpleTest(
83 armnn::IWorkloadFactory& workloadFactory,
84 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
85 const armnn::ITensorHandleFactory& tensorHandleFactory)
86{
87 const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
88 const armnn::TensorShape outputShape{ 1, 1, 1, 1};
89
90 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
91
92 if (armnn::IsQuantizedType<T>())
93 {
94 inputTensorInfo.SetQuantizationScale(1.0f);
95 inputTensorInfo.SetQuantizationOffset(0);
96 }
97
98 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
99
100 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
101 std::vector<float> outputValues({ 34.0f });
102
103 return ReduceTestCommon<ArmnnType>(workloadFactory,
104 memoryManager,
105 tensorHandleFactory,
106 inputTensorInfo,
107 outputTensorInfo,
108 inputValues,
109 outputValues,
110 { -1 },
111 armnn::ReduceOperation::Sum);
112}
113
114template<armnn::DataType ArmnnType, typename T>
115LayerTestResult<float, 4> ReduceSumSingleAxisTest1(
116 armnn::IWorkloadFactory& workloadFactory,
117 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
118 const armnn::ITensorHandleFactory& tensorHandleFactory)
119{
120 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
121 const armnn::TensorShape outputShape{ 1, 1, 2, 4};
122
123 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
124
125 if (armnn::IsQuantizedType<T>())
126 {
127 inputTensorInfo.SetQuantizationScale(1.0f);
128 inputTensorInfo.SetQuantizationOffset(0);
129 }
130
131 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
132
133 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
134 5.0f, 6.0f, 7.0f, 8.0f,
135
136 10.0f, 20.0f, 30.0f, 40.0f,
137 50.0f, 60.0f, 70.0f, 80.0f,
138
139 100.0f, 200.0f, 300.0f, 400.0f,
140 500.0f, 600.0f, 700.0f, 800.0f });
141 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
142 555.0f, 666.0f, 777.0f, 888.0f });
143
144 return ReduceTestCommon<ArmnnType>(workloadFactory,
145 memoryManager,
146 tensorHandleFactory,
147 inputTensorInfo,
148 outputTensorInfo,
149 inputValues,
150 outputValues,
151 { 1 },
152 armnn::ReduceOperation::Sum);
153}
154
155template<armnn::DataType ArmnnType, typename T>
156LayerTestResult<float, 4> ReduceSumSingleAxisTest2(
157 armnn::IWorkloadFactory& workloadFactory,
158 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
159 const armnn::ITensorHandleFactory& tensorHandleFactory)
160{
161 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
162 const armnn::TensorShape outputShape{ 1, 1, 3, 4};
163
164 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
165
166 if (armnn::IsQuantizedType<T>())
167 {
168 inputTensorInfo.SetQuantizationScale(1.0f);
169 inputTensorInfo.SetQuantizationOffset(0);
170 }
171
172 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
173
174 std::vector<float> inputValues( {7, 8, 6, 1,
175 1, 1, 8, 7,
176 3, 7, 7, 7,
177
178 6, 8, 4, 7,
179 3, 8, 7, 3,
180 5, 8, 8, 8,
181
182
183 7, 8, 2, 7,
184 3, 8, 5, 6,
185 8, 4, 2, 7,
186
187 1, 6, 7, 2,
188 8, 3, 3, 1,
189 7, 6, 2, 6,
190
191
192 5, 3, 4, 8,
193 7, 8, 2, 4,
194 6, 6, 2, 8,
195
196 2, 2, 7, 2,
197 5, 3, 6, 3,
198 6, 1, 8, 8});
199 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
200 27.0f, 31.0f, 31.0f, 24.0f,
201 35.0f, 32.0f, 29.0f, 44.0f});
202
203 return ReduceTestCommon<ArmnnType>(workloadFactory,
204 memoryManager,
205 tensorHandleFactory,
206 inputTensorInfo,
207 outputTensorInfo,
208 inputValues,
209 outputValues,
210 { 1 },
211 armnn::ReduceOperation::Sum);
212}
213
214template<armnn::DataType ArmnnType, typename T>
215LayerTestResult<float, 4> ReduceSumSingleAxisTest3(
216 armnn::IWorkloadFactory& workloadFactory,
217 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
218 const armnn::ITensorHandleFactory& tensorHandleFactory)
219{
220 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
221 const armnn::TensorShape outputShape{ 1, 6, 3, 1};
222
223 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
224
225 if (armnn::IsQuantizedType<T>())
226 {
227 inputTensorInfo.SetQuantizationScale(1.0f);
228 inputTensorInfo.SetQuantizationOffset(0);
229 }
230
231 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
232
233 std::vector<float> inputValues( {7, 8, 6, 1,
234 1, 1, 8, 7,
235 3, 7, 7, 7,
236
237 6, 8, 4, 7,
238 3, 8, 7, 3,
239 5, 8, 8, 8,
240
241
242 7, 8, 2, 7,
243 3, 8, 5, 6,
244 8, 4, 2, 7,
245
246 1, 6, 7, 2,
247 8, 3, 3, 1,
248 7, 6, 2, 6,
249
250
251 5, 3, 4, 8,
252 7, 8, 2, 4,
253 6, 6, 2, 8,
254
255 2, 2, 7, 2,
256 5, 3, 6, 3,
257 6, 1, 8, 8});
258 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
259 25.0f, 21.0f, 29.0f,
260
261 24.0f, 22.0f, 21.0f,
262 16.0f, 15.0f, 21.0f,
263
264 20.0f, 21.0f, 22.0f,
265 13.0f, 17.0f, 23.0f});
266
267 return ReduceTestCommon<ArmnnType>(workloadFactory,
268 memoryManager,
269 tensorHandleFactory,
270 inputTensorInfo,
271 outputTensorInfo,
272 inputValues,
273 outputValues,
274 { 3 },
Sadik Armagana2747482021-02-09 10:28:54 +0000275 armnn::ReduceOperation::Sum,
276 true);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +0000277}
278
279template<armnn::DataType ArmnnType, typename T>
280LayerTestResult<float, 4> ReduceSumMultipleAxisTest(
281 armnn::IWorkloadFactory& workloadFactory,
282 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
283 const armnn::ITensorHandleFactory& tensorHandleFactory)
284{
285 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
286 const armnn::TensorShape outputShape{ 1, 1, 1, 4};
287
288 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
289
290 if (armnn::IsQuantizedType<T>())
291 {
292 inputTensorInfo.SetQuantizationScale(1.0f);
293 inputTensorInfo.SetQuantizationOffset(0);
294 }
295
296 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
297
298 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
299 5.0f, 6.0f, 7.0f, 8.0f,
300
301 10.0f, 20.0f, 30.0f, 40.0f,
302 50.0f, 60.0f, 70.0f, 80.0f,
303
304 100.0f, 200.0f, 300.0f, 400.0f,
305 500.0f, 600.0f, 700.0f, 800.0f });
306 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
307
308 return ReduceTestCommon<ArmnnType>(workloadFactory,
309 memoryManager,
310 tensorHandleFactory,
311 inputTensorInfo,
312 outputTensorInfo,
313 inputValues,
314 outputValues,
315 { 1, 2 },
316 armnn::ReduceOperation::Sum);
317}
318
319// Explicit template specializations
320
321template LayerTestResult<float, 4>
322ReduceSumSimpleTest<armnn::DataType::Float32>(
323 armnn::IWorkloadFactory& workloadFactory,
324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
325 const armnn::ITensorHandleFactory& tensorHandleFactory);
326
327template LayerTestResult<float, 4>
328ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
329 armnn::IWorkloadFactory& workloadFactory,
330 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
331 const armnn::ITensorHandleFactory& tensorHandleFactory);
332
333template LayerTestResult<float, 4>
334ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
335 armnn::IWorkloadFactory& workloadFactory,
336 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
337 const armnn::ITensorHandleFactory& tensorHandleFactory);
338
339template LayerTestResult<float, 4>
340ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
341 armnn::IWorkloadFactory& workloadFactory,
342 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
343 const armnn::ITensorHandleFactory& tensorHandleFactory);
344
345template LayerTestResult<float, 4>
346ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
347 armnn::IWorkloadFactory& workloadFactory,
348 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
349 const armnn::ITensorHandleFactory& tensorHandleFactory);