blob: bda8870b569f7b87a2b69235ca1240971a8f5f0a [file] [log] [blame]
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +00001//
2// Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReduceSumTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <DataTypeUtils.hpp>
9#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnnTestUtils/WorkloadTestUtils.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000011
Colm Donelanc42a9872022-02-02 16:35:09 +000012#include <armnnTestUtils/TensorHelpers.hpp>
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000013
14namespace
15{
16
17template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18LayerTestResult<float, 4> ReduceTestCommon(
19 armnn::IWorkloadFactory& workloadFactory,
20 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
21 const armnn::ITensorHandleFactory& tensorHandleFactory,
22 const armnn::TensorInfo inputTensorInfo,
23 const armnn::TensorInfo outputTensorInfo,
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
Sadik Armagana2747482021-02-09 10:28:54 +000027 const armnn::ReduceOperation reduceOperation,
28 bool keepDims = false)
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000029{
30 IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +010031 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000032
Sadik Armagan483c8112021-06-01 09:24:52 +010033 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000034
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37
38 armnn::ReduceQueueDescriptor descriptor;
39 std::vector<uint32_t> updated_idx;
40 uint32_t resolvedAxis = 0;
41 for (uint32_t i = 0; i < vAxis.size(); ++i)
42 {
43 if (vAxis[i] < 0)
44 {
45 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
46 } else
47 {
48 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
49 }
50
51 updated_idx.push_back(resolvedAxis);
52 }
53
54 descriptor.m_Parameters.m_vAxis = updated_idx;
55 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
Sadik Armagana2747482021-02-09 10:28:54 +000056 descriptor.m_Parameters.m_KeepDims = keepDims;
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000057 armnn::WorkloadInfo info;
58
59 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
61
Teresa Charlin611c7fb2022-01-07 09:47:29 +000062 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
63 descriptor,
64 info);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000065
66 inputHandle->Allocate();
67 outputHandle->Allocate();
68
Sadik Armagan483c8112021-06-01 09:24:52 +010069 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000070
71 workload->Execute();
72
Sadik Armagan483c8112021-06-01 09:24:52 +010073 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000074
Sadik Armagan483c8112021-06-01 09:24:52 +010075 return LayerTestResult<float, 4>(actualOutput,
76 outputData,
77 outputHandle->GetShape(),
78 outputTensorInfo.GetShape());
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +000079}
80
81} // namespace
82
83template<armnn::DataType ArmnnType, typename T>
84LayerTestResult<float, 4> ReduceSumSimpleTest(
85 armnn::IWorkloadFactory& workloadFactory,
86 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
87 const armnn::ITensorHandleFactory& tensorHandleFactory)
88{
89 const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
90 const armnn::TensorShape outputShape{ 1, 1, 1, 1};
91
92 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
93
94 if (armnn::IsQuantizedType<T>())
95 {
96 inputTensorInfo.SetQuantizationScale(1.0f);
97 inputTensorInfo.SetQuantizationOffset(0);
98 }
99
100 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
101
102 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
103 std::vector<float> outputValues({ 34.0f });
104
105 return ReduceTestCommon<ArmnnType>(workloadFactory,
106 memoryManager,
107 tensorHandleFactory,
108 inputTensorInfo,
109 outputTensorInfo,
110 inputValues,
111 outputValues,
112 { -1 },
113 armnn::ReduceOperation::Sum);
114}
115
116template<armnn::DataType ArmnnType, typename T>
117LayerTestResult<float, 4> ReduceSumSingleAxisTest1(
118 armnn::IWorkloadFactory& workloadFactory,
119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
120 const armnn::ITensorHandleFactory& tensorHandleFactory)
121{
122 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
123 const armnn::TensorShape outputShape{ 1, 1, 2, 4};
124
125 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
126
127 if (armnn::IsQuantizedType<T>())
128 {
129 inputTensorInfo.SetQuantizationScale(1.0f);
130 inputTensorInfo.SetQuantizationOffset(0);
131 }
132
133 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
134
135 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
136 5.0f, 6.0f, 7.0f, 8.0f,
137
138 10.0f, 20.0f, 30.0f, 40.0f,
139 50.0f, 60.0f, 70.0f, 80.0f,
140
141 100.0f, 200.0f, 300.0f, 400.0f,
142 500.0f, 600.0f, 700.0f, 800.0f });
143 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
144 555.0f, 666.0f, 777.0f, 888.0f });
145
146 return ReduceTestCommon<ArmnnType>(workloadFactory,
147 memoryManager,
148 tensorHandleFactory,
149 inputTensorInfo,
150 outputTensorInfo,
151 inputValues,
152 outputValues,
153 { 1 },
154 armnn::ReduceOperation::Sum);
155}
156
157template<armnn::DataType ArmnnType, typename T>
158LayerTestResult<float, 4> ReduceSumSingleAxisTest2(
159 armnn::IWorkloadFactory& workloadFactory,
160 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
161 const armnn::ITensorHandleFactory& tensorHandleFactory)
162{
163 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
164 const armnn::TensorShape outputShape{ 1, 1, 3, 4};
165
166 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
167
168 if (armnn::IsQuantizedType<T>())
169 {
170 inputTensorInfo.SetQuantizationScale(1.0f);
171 inputTensorInfo.SetQuantizationOffset(0);
172 }
173
174 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
175
176 std::vector<float> inputValues( {7, 8, 6, 1,
177 1, 1, 8, 7,
178 3, 7, 7, 7,
179
180 6, 8, 4, 7,
181 3, 8, 7, 3,
182 5, 8, 8, 8,
183
184
185 7, 8, 2, 7,
186 3, 8, 5, 6,
187 8, 4, 2, 7,
188
189 1, 6, 7, 2,
190 8, 3, 3, 1,
191 7, 6, 2, 6,
192
193
194 5, 3, 4, 8,
195 7, 8, 2, 4,
196 6, 6, 2, 8,
197
198 2, 2, 7, 2,
199 5, 3, 6, 3,
200 6, 1, 8, 8});
201 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
202 27.0f, 31.0f, 31.0f, 24.0f,
203 35.0f, 32.0f, 29.0f, 44.0f});
204
205 return ReduceTestCommon<ArmnnType>(workloadFactory,
206 memoryManager,
207 tensorHandleFactory,
208 inputTensorInfo,
209 outputTensorInfo,
210 inputValues,
211 outputValues,
212 { 1 },
213 armnn::ReduceOperation::Sum);
214}
215
216template<armnn::DataType ArmnnType, typename T>
217LayerTestResult<float, 4> ReduceSumSingleAxisTest3(
218 armnn::IWorkloadFactory& workloadFactory,
219 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
220 const armnn::ITensorHandleFactory& tensorHandleFactory)
221{
222 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
223 const armnn::TensorShape outputShape{ 1, 6, 3, 1};
224
225 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
226
227 if (armnn::IsQuantizedType<T>())
228 {
229 inputTensorInfo.SetQuantizationScale(1.0f);
230 inputTensorInfo.SetQuantizationOffset(0);
231 }
232
233 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
234
235 std::vector<float> inputValues( {7, 8, 6, 1,
236 1, 1, 8, 7,
237 3, 7, 7, 7,
238
239 6, 8, 4, 7,
240 3, 8, 7, 3,
241 5, 8, 8, 8,
242
243
244 7, 8, 2, 7,
245 3, 8, 5, 6,
246 8, 4, 2, 7,
247
248 1, 6, 7, 2,
249 8, 3, 3, 1,
250 7, 6, 2, 6,
251
252
253 5, 3, 4, 8,
254 7, 8, 2, 4,
255 6, 6, 2, 8,
256
257 2, 2, 7, 2,
258 5, 3, 6, 3,
259 6, 1, 8, 8});
260 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
261 25.0f, 21.0f, 29.0f,
262
263 24.0f, 22.0f, 21.0f,
264 16.0f, 15.0f, 21.0f,
265
266 20.0f, 21.0f, 22.0f,
267 13.0f, 17.0f, 23.0f});
268
269 return ReduceTestCommon<ArmnnType>(workloadFactory,
270 memoryManager,
271 tensorHandleFactory,
272 inputTensorInfo,
273 outputTensorInfo,
274 inputValues,
275 outputValues,
276 { 3 },
Sadik Armagana2747482021-02-09 10:28:54 +0000277 armnn::ReduceOperation::Sum,
278 true);
Sadik Armagan0c3ea5b2021-02-03 09:29:30 +0000279}
280
281template<armnn::DataType ArmnnType, typename T>
282LayerTestResult<float, 4> ReduceSumMultipleAxisTest(
283 armnn::IWorkloadFactory& workloadFactory,
284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
285 const armnn::ITensorHandleFactory& tensorHandleFactory)
286{
287 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
288 const armnn::TensorShape outputShape{ 1, 1, 1, 4};
289
290 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
291
292 if (armnn::IsQuantizedType<T>())
293 {
294 inputTensorInfo.SetQuantizationScale(1.0f);
295 inputTensorInfo.SetQuantizationOffset(0);
296 }
297
298 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
299
300 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
301 5.0f, 6.0f, 7.0f, 8.0f,
302
303 10.0f, 20.0f, 30.0f, 40.0f,
304 50.0f, 60.0f, 70.0f, 80.0f,
305
306 100.0f, 200.0f, 300.0f, 400.0f,
307 500.0f, 600.0f, 700.0f, 800.0f });
308 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
309
310 return ReduceTestCommon<ArmnnType>(workloadFactory,
311 memoryManager,
312 tensorHandleFactory,
313 inputTensorInfo,
314 outputTensorInfo,
315 inputValues,
316 outputValues,
317 { 1, 2 },
318 armnn::ReduceOperation::Sum);
319}
320
321// Explicit template specializations
322
323template LayerTestResult<float, 4>
324ReduceSumSimpleTest<armnn::DataType::Float32>(
325 armnn::IWorkloadFactory& workloadFactory,
326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
327 const armnn::ITensorHandleFactory& tensorHandleFactory);
328
329template LayerTestResult<float, 4>
330ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
331 armnn::IWorkloadFactory& workloadFactory,
332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
333 const armnn::ITensorHandleFactory& tensorHandleFactory);
334
335template LayerTestResult<float, 4>
336ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
337 armnn::IWorkloadFactory& workloadFactory,
338 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
339 const armnn::ITensorHandleFactory& tensorHandleFactory);
340
341template LayerTestResult<float, 4>
342ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
343 armnn::IWorkloadFactory& workloadFactory,
344 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
345 const armnn::ITensorHandleFactory& tensorHandleFactory);
346
347template LayerTestResult<float, 4>
348ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
349 armnn::IWorkloadFactory& workloadFactory,
350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
351 const armnn::ITensorHandleFactory& tensorHandleFactory);