blob: 278b349d50bfb31c2b6700857162292bb600e854 [file] [log] [blame]
Teresa Charlin4e3e8312021-08-05 12:34:37 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReduceProdTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <DataTypeUtils.hpp>
9#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnnTestUtils/WorkloadTestUtils.hpp>
Teresa Charlin4e3e8312021-08-05 12:34:37 +010011
Colm Donelanc42a9872022-02-02 16:35:09 +000012#include <armnnTestUtils/TensorHelpers.hpp>
Teresa Charlin4e3e8312021-08-05 12:34:37 +010013
14namespace
15{
16
17template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
18LayerTestResult<float, 4> ReduceTestCommon(
19 armnn::IWorkloadFactory& workloadFactory,
20 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
21 const armnn::ITensorHandleFactory& tensorHandleFactory,
22 const armnn::TensorInfo inputTensorInfo,
23 const armnn::TensorInfo outputTensorInfo,
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
27 const armnn::ReduceOperation reduceOperation,
28 bool keepDims = false)
29{
30 IgnoreUnused(memoryManager);
31 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
32
33 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
34
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
37
38 armnn::ReduceQueueDescriptor descriptor;
39 std::vector<uint32_t> updated_idx;
40 uint32_t resolvedAxis = 0;
41 for (uint32_t i = 0; i < vAxis.size(); ++i)
42 {
43 if (vAxis[i] < 0)
44 {
45 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
46 } else
47 {
48 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
49 }
50
51 updated_idx.push_back(resolvedAxis);
52 }
53
54 descriptor.m_Parameters.m_vAxis = updated_idx;
55 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
56 descriptor.m_Parameters.m_KeepDims = keepDims;
57 armnn::WorkloadInfo info;
58
59 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
61
Teresa Charlin611c7fb2022-01-07 09:47:29 +000062 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
63 descriptor,
64 info);
Teresa Charlin4e3e8312021-08-05 12:34:37 +010065
66 inputHandle->Allocate();
67 outputHandle->Allocate();
68
69 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
70
71 workload->Execute();
72
73 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
74
75 return LayerTestResult<float, 4>(actualOutput,
76 outputData,
77 outputHandle->GetShape(),
78 outputTensorInfo.GetShape());
79}
80
81} // namespace
82
83template<armnn::DataType ArmnnType, typename T>
84LayerTestResult<float, 4> ReduceProdSimpleTest(
85 armnn::IWorkloadFactory& workloadFactory,
86 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
87 const armnn::ITensorHandleFactory& tensorHandleFactory)
88{
89 const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
90 const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
91
92 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
93
94 if (armnn::IsQuantizedType<T>())
95 {
96 inputTensorInfo.SetQuantizationScale(1.0f);
97 inputTensorInfo.SetQuantizationOffset(0);
98 }
99
100 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
101
102 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
103 std::vector<float> outputValues({ 7200.0f });
104
105 return ReduceTestCommon<ArmnnType>(workloadFactory,
106 memoryManager,
107 tensorHandleFactory,
108 inputTensorInfo,
109 outputTensorInfo,
110 inputValues,
111 outputValues,
112 { -1 },
113 armnn::ReduceOperation::Prod);
114}
115
116template<armnn::DataType ArmnnType, typename T>
117LayerTestResult<float, 4> ReduceProdSingleAxisTest1(
118 armnn::IWorkloadFactory& workloadFactory,
119 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
120 const armnn::ITensorHandleFactory& tensorHandleFactory)
121{
122 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
123 const armnn::TensorShape outputShape{ 1, 1, 2, 4 };
124
125 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
126
127 if (armnn::IsQuantizedType<T>())
128 {
129 inputTensorInfo.SetQuantizationScale(1.0f);
130 inputTensorInfo.SetQuantizationOffset(0);
131 }
132
133 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
134
135 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
136 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f,
137 100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f, 700.0f, 800.0f
138 });
139 std::vector<float> outputValues({ 1000.0f, 8000.0f, 27000.0f, 64000.0f, 125000.0f, 216000.0f, 343000.0f, 512000.0f
140 });
141
142 return ReduceTestCommon<ArmnnType>(workloadFactory,
143 memoryManager,
144 tensorHandleFactory,
145 inputTensorInfo,
146 outputTensorInfo,
147 inputValues,
148 outputValues,
149 { 1 },
150 armnn::ReduceOperation::Prod);
151}
152
153template<armnn::DataType ArmnnType, typename T>
154LayerTestResult<float, 4> ReduceProdSingleAxisTest2(
155 armnn::IWorkloadFactory& workloadFactory,
156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
157 const armnn::ITensorHandleFactory& tensorHandleFactory)
158{
159 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
160 const armnn::TensorShape outputShape{ 1, 1, 3, 4};
161
162 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
163
164 if (armnn::IsQuantizedType<T>())
165 {
166 inputTensorInfo.SetQuantizationScale(1.0f);
167 inputTensorInfo.SetQuantizationOffset(0);
168 }
169
170 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
171
172 std::vector<float> inputValues( {7, 8, 6, 1,
173 1, 1, 8, 7,
174 3, 7, 7, 7,
175
176 6, 8, 4, 7,
177 3, 8, 7, 3,
178 5, 8, 8, 8,
179
180
181 7, 8, 2, 7,
182 3, 8, 5, 6,
183 8, 4, 2, 7,
184
185 1, 6, 7, 2,
186 8, 3, 3, 1,
187 7, 6, 2, 6,
188
189
190 5, 3, 4, 8,
191 7, 8, 2, 4,
192 6, 6, 2, 8,
193
194 2, 2, 7, 2,
195 5, 3, 6, 3,
196 6, 1, 8, 8});
197 std::vector<float> outputValues({ 2940.f, 18432.f, 9408.f, 1568.f,
198 2520.f, 4608.f, 10080.f, 1512.f,
199 30240.f, 8064.f, 3584.f, 150528.f });
200
201 return ReduceTestCommon<ArmnnType>(workloadFactory,
202 memoryManager,
203 tensorHandleFactory,
204 inputTensorInfo,
205 outputTensorInfo,
206 inputValues,
207 outputValues,
208 { 1 },
209 armnn::ReduceOperation::Prod);
210}
211
212template<armnn::DataType ArmnnType, typename T>
213LayerTestResult<float, 4> ReduceProdSingleAxisTest3(
214 armnn::IWorkloadFactory& workloadFactory,
215 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
216 const armnn::ITensorHandleFactory& tensorHandleFactory)
217{
218 const armnn::TensorShape inputShape{ 1, 6, 3, 4 };
219 const armnn::TensorShape outputShape{ 1, 6, 3, 1 };
220
221 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
222
223 if (armnn::IsQuantizedType<T>())
224 {
225 inputTensorInfo.SetQuantizationScale(1.0f);
226 inputTensorInfo.SetQuantizationOffset(0);
227 }
228
229 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
230
231 std::vector<float> inputValues({ 7, 8, 6, 1,
232 1, 1, 8, 7,
233 3, 7, 7, 7,
234
235 6, 8, 4, 7,
236 3, 8, 7, 3,
237 5, 8, 8, 8,
238
239
240 7, 8, 2, 7,
241 3, 8, 5, 6,
242 8, 4, 2, 7,
243
244 1, 6, 7, 2,
245 8, 3, 3, 1,
246 7, 6, 2, 6,
247
248
249 5, 3, 4, 8,
250 7, 8, 2, 4,
251 6, 6, 2, 8,
252
253 2, 2, 7, 2,
254 5, 3, 6, 3,
255 6, 1, 8, 8 });
256 std::vector<float> outputValues({ 336.f, 56.f, 1029.f,
257 1344.f, 504.f, 2560.f,
258
259 784.f, 720.f, 448.f,
260 84.f, 72.f, 504.f,
261
262 480.f, 448.f, 576.f,
263 56.f, 270.f, 384.f });
264
265 return ReduceTestCommon<ArmnnType>(workloadFactory,
266 memoryManager,
267 tensorHandleFactory,
268 inputTensorInfo,
269 outputTensorInfo,
270 inputValues,
271 outputValues,
272 { 3 },
273 armnn::ReduceOperation::Prod,
274 true);
275}
276
277template<armnn::DataType ArmnnType, typename T>
278LayerTestResult<float, 4> ReduceProdMultipleAxisTest(
279 armnn::IWorkloadFactory& workloadFactory,
280 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
281 const armnn::ITensorHandleFactory& tensorHandleFactory)
282{
283 const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
284 const armnn::TensorShape outputShape{ 1, 1, 1, 4 };
285
286 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
287
288 if (armnn::IsQuantizedType<T>())
289 {
290 inputTensorInfo.SetQuantizationScale(1.0f);
291 inputTensorInfo.SetQuantizationOffset(0);
292 }
293
294 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
295
296 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
297 5.0f, 6.0f, 7.0f, 8.0f,
298
299 10.0f, 20.0f, 30.0f, 40.0f,
300 50.0f, 60.0f, 70.0f, 80.0f,
301
302 11.0f, 22.0f, 33.0f, 44.0f,
303 55.0f, 66.0f, 77.0f, 88.0f });
304 std::vector<float> outputValues({ 1512500.f, 20908800.f, 112058100.f, 396492800.f });
305
306 return ReduceTestCommon<ArmnnType>(workloadFactory,
307 memoryManager,
308 tensorHandleFactory,
309 inputTensorInfo,
310 outputTensorInfo,
311 inputValues,
312 outputValues,
313 { 1, 2 },
314 armnn::ReduceOperation::Prod);
315}
316
317// Explicit template specializations
318
319template LayerTestResult<float, 4>
320ReduceProdSimpleTest<armnn::DataType::Float32>(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
323 const armnn::ITensorHandleFactory& tensorHandleFactory);
324
325template LayerTestResult<float, 4>
326ReduceProdSingleAxisTest1<armnn::DataType::Float32>(
327 armnn::IWorkloadFactory& workloadFactory,
328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
329 const armnn::ITensorHandleFactory& tensorHandleFactory);
330
331template LayerTestResult<float, 4>
332ReduceProdSingleAxisTest2<armnn::DataType::Float32>(
333 armnn::IWorkloadFactory& workloadFactory,
334 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
335 const armnn::ITensorHandleFactory& tensorHandleFactory);
336
337template LayerTestResult<float, 4>
338ReduceProdSingleAxisTest3<armnn::DataType::Float32>(
339 armnn::IWorkloadFactory& workloadFactory,
340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
341 const armnn::ITensorHandleFactory& tensorHandleFactory);
342
343template LayerTestResult<float, 4>
344ReduceProdMultipleAxisTest<armnn::DataType::Float32>(
345 armnn::IWorkloadFactory& workloadFactory,
346 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
347 const armnn::ITensorHandleFactory& tensorHandleFactory);