blob: b4be4f1396b55b111dae5f3b788b90c35bdbd046 [file] [log] [blame]
Sadik Armagana2747482021-02-09 10:28:54 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReductionTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <DataTypeUtils.hpp>
9#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnnTestUtils/WorkloadTestUtils.hpp>
Sadik Armagana2747482021-02-09 10:28:54 +000011
Colm Donelanc42a9872022-02-02 16:35:09 +000012#include <armnnTestUtils/TensorHelpers.hpp>
Sadik Armagana2747482021-02-09 10:28:54 +000013
14#include <iostream>
15
16namespace
17{
18
19template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
20LayerTestResult<float, 4> ReductionTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 const armnn::TensorInfo inputTensorInfo,
25 const armnn::TensorInfo outputTensorInfo,
26 const std::vector<float>& inputData,
27 const std::vector<float>& outputData,
28 const std::vector<int32_t> vAxis,
29 const armnn::ReduceOperation reduceOperation,
30 bool keepDims = false)
31{
32 IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +010033 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
Sadik Armagana2747482021-02-09 10:28:54 +000034
Sadik Armagan483c8112021-06-01 09:24:52 +010035 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
Sadik Armagana2747482021-02-09 10:28:54 +000036
37 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
39
40 armnn::ReduceQueueDescriptor descriptor;
41 std::vector<uint32_t> updated_idx;
42 uint32_t resolvedAxis = 0;
43 for (uint32_t i = 0; i < vAxis.size(); ++i)
44 {
45 if (vAxis[i] < 0)
46 {
47 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
48 } else
49 {
50 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
51 }
52
53 updated_idx.push_back(resolvedAxis);
54 }
55
56 descriptor.m_Parameters.m_vAxis = updated_idx;
57 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
58 descriptor.m_Parameters.m_KeepDims = keepDims;
59 armnn::WorkloadInfo info;
60
61 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
62 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
63
Teresa Charlin611c7fb2022-01-07 09:47:29 +000064 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
65 descriptor,
66 info);
Sadik Armagana2747482021-02-09 10:28:54 +000067
68 inputHandle->Allocate();
69 outputHandle->Allocate();
70
Sadik Armagan483c8112021-06-01 09:24:52 +010071 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
Sadik Armagana2747482021-02-09 10:28:54 +000072
73 workload->Execute();
74
Sadik Armagan483c8112021-06-01 09:24:52 +010075 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Sadik Armagana2747482021-02-09 10:28:54 +000076
Sadik Armagan483c8112021-06-01 09:24:52 +010077 return LayerTestResult<float, 4>(actualOutput,
78 outputData,
79 outputHandle->GetShape(),
80 outputTensorInfo.GetShape());
Sadik Armagana2747482021-02-09 10:28:54 +000081}
82
83} // namespace
84
85template<armnn::DataType ArmnnType, typename T>
86LayerTestResult<float, 4> ReduceMaxSimpleTest(
87 armnn::IWorkloadFactory& workloadFactory,
88 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
89 const armnn::ITensorHandleFactory& tensorHandleFactory)
90{
91 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
92 const armnn::TensorShape outputShape{ 1, 1, 1, 3};
93
94 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
95
96 if (armnn::IsQuantizedType<T>())
97 {
98 inputTensorInfo.SetQuantizationScale(1.0f);
99 inputTensorInfo.SetQuantizationOffset(0);
100 }
101
102 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
103
104 std::vector<float> inputValues
105 ({
106 1001.0f, 11.0f, 1003.0f,
107 10.0f, 1002.0f, 12.0f
108 });
109 std::vector<float> outputValues
110 ({
111 1001.0f, 1002.0f, 1003.0f
112 });
113
114 return ReductionTestCommon<ArmnnType>(workloadFactory,
115 memoryManager,
116 tensorHandleFactory,
117 inputTensorInfo,
118 outputTensorInfo,
119 inputValues,
120 outputValues,
121 { 2 },
122 armnn::ReduceOperation::Max);
123}
124
125template<armnn::DataType ArmnnType, typename T>
126LayerTestResult<float, 4> ReduceMaxNegativeAxisTest(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 const armnn::ITensorHandleFactory& tensorHandleFactory)
130{
131 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
132 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
133
134 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
135
136 if (armnn::IsQuantizedType<T>())
137 {
138 inputTensorInfo.SetQuantizationScale(1.0f);
139 inputTensorInfo.SetQuantizationOffset(0);
140 }
141
142 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
143
144 std::vector<float> inputValues
145 ({
146 1001.0f, 11.0f, 1003.0f,
147 10.0f, 1002.0f, 12.0f
148 });
149 std::vector<float> outputValues
150 ({
151 1003.0f, 1002.0f
152 });
153
154 return ReductionTestCommon<ArmnnType>(workloadFactory,
155 memoryManager,
156 tensorHandleFactory,
157 inputTensorInfo,
158 outputTensorInfo,
159 inputValues,
160 outputValues,
161 { -1 },
162 armnn::ReduceOperation::Max,
163 true);
164}
165
166template<armnn::DataType ArmnnType, typename T>
167LayerTestResult<float, 4> ReduceMaxSimpleTest2(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 const armnn::ITensorHandleFactory& tensorHandleFactory)
171{
172 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
173 const armnn::TensorShape outputShape{ 1, 1, 2, 1 };
174
175 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
176
177 if (armnn::IsQuantizedType<T>())
178 {
179 inputTensorInfo.SetQuantizationScale(1.0f);
180 inputTensorInfo.SetQuantizationOffset(0);
181 }
182
183 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
184
185 std::vector<float> inputValues
186 ({
187 1.0f, 3.0f, 2.0f,
188 6.0f, 4.0f, 5.0f
189 });
190
191 std::vector<float> outputValues
192 ({
193 3.0f, 6.0f
194 });
195
196 return ReductionTestCommon<ArmnnType>(workloadFactory,
197 memoryManager,
198 tensorHandleFactory,
199 inputTensorInfo,
200 outputTensorInfo,
201 inputValues,
202 outputValues,
203 { 3 },
204 armnn::ReduceOperation::Max,
205 true);
206}
207
208template<armnn::DataType ArmnnType, typename T>
209LayerTestResult<float, 4> ReduceMinSimpleTest(
210 armnn::IWorkloadFactory& workloadFactory,
211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
212 const armnn::ITensorHandleFactory& tensorHandleFactory)
213{
214 const armnn::TensorShape inputShape { 1, 1, 2, 3 };
215 const armnn::TensorShape outputShape { 1, 1, 1, 3};
216
217 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
218
219 if (armnn::IsQuantizedType<T>())
220 {
221 inputTensorInfo.SetQuantizationScale(1.0f);
222 inputTensorInfo.SetQuantizationOffset(0);
223 }
224
225 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
226
227 std::vector<float> inputValues
228 ({
229 1001.0f, 11.0f, 1003.0f,
230 10.0f, 1002.0f, 12.0f
231 });
232 std::vector<float> outputValues
233 ({
234 10.0f, 11.0f, 12.0f
235 });
236
237 return ReductionTestCommon<ArmnnType>(workloadFactory,
238 memoryManager,
239 tensorHandleFactory,
240 inputTensorInfo,
241 outputTensorInfo,
242 inputValues,
243 outputValues,
244 { 2 },
245 armnn::ReduceOperation::Min);
246}
247
248template<armnn::DataType ArmnnType, typename T>
249LayerTestResult<float, 4> ReduceMinNegativeAxisTest(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 const armnn::ITensorHandleFactory& tensorHandleFactory)
253{
254 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
255 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
256
257 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
258
259 if (armnn::IsQuantizedType<T>())
260 {
261 inputTensorInfo.SetQuantizationScale(1.0f);
262 inputTensorInfo.SetQuantizationOffset(0);
263 }
264
265 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
266
267 std::vector<float> inputValues
268 ({
269 1001.0f, 11.0f, 1003.0f,
270 10.0f, 1002.0f, 12.0f
271 });
272 std::vector<float> outputValues
273 ({
274 11.0f, 10.0f
275 });
276
277 return ReductionTestCommon<ArmnnType>(workloadFactory,
278 memoryManager,
279 tensorHandleFactory,
280 inputTensorInfo,
281 outputTensorInfo,
282 inputValues,
283 outputValues,
284 { -1 },
285 armnn::ReduceOperation::Min,
286 true);
287}
288
289// Explicit template specializations
290template LayerTestResult<float, 4>
291ReduceMaxSimpleTest<armnn::DataType::Float32>(
292 armnn::IWorkloadFactory& workloadFactory,
293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
294 const armnn::ITensorHandleFactory& tensorHandleFactory);
295
296template LayerTestResult<float, 4>
297ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
298 armnn::IWorkloadFactory& workloadFactory,
299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
300 const armnn::ITensorHandleFactory& tensorHandleFactory);
301
302template LayerTestResult<float, 4>
303ReduceMaxSimpleTest2<armnn::DataType::Float32>(
304 armnn::IWorkloadFactory& workloadFactory,
305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
306 const armnn::ITensorHandleFactory& tensorHandleFactory);
307
308template LayerTestResult<float, 4>
309ReduceMinSimpleTest<armnn::DataType::Float32>(
310 armnn::IWorkloadFactory& workloadFactory,
311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
312 const armnn::ITensorHandleFactory& tensorHandleFactory);
313
314template LayerTestResult<float, 4>
315ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
316 armnn::IWorkloadFactory& workloadFactory,
317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
318 const armnn::ITensorHandleFactory& tensorHandleFactory);
319