blob: 47cb1d730ac835839d164a2a7f7829915d8325c1 [file] [log] [blame]
Sadik Armagana2747482021-02-09 10:28:54 +00001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ReductionTestImpl.hpp"
7
Sadik Armagana097d2a2021-11-24 15:47:28 +00008#include <DataTypeUtils.hpp>
9#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnnTestUtils/WorkloadTestUtils.hpp>
Sadik Armagana2747482021-02-09 10:28:54 +000011
Sadik Armagana097d2a2021-11-24 15:47:28 +000012#include <TensorHelpers.hpp>
Sadik Armagana2747482021-02-09 10:28:54 +000013
14#include <iostream>
15
16namespace
17{
18
19template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
20LayerTestResult<float, 4> ReductionTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 const armnn::TensorInfo inputTensorInfo,
25 const armnn::TensorInfo outputTensorInfo,
26 const std::vector<float>& inputData,
27 const std::vector<float>& outputData,
28 const std::vector<int32_t> vAxis,
29 const armnn::ReduceOperation reduceOperation,
30 bool keepDims = false)
31{
32 IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +010033 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
Sadik Armagana2747482021-02-09 10:28:54 +000034
Sadik Armagan483c8112021-06-01 09:24:52 +010035 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
Sadik Armagana2747482021-02-09 10:28:54 +000036
37 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
39
40 armnn::ReduceQueueDescriptor descriptor;
41 std::vector<uint32_t> updated_idx;
42 uint32_t resolvedAxis = 0;
43 for (uint32_t i = 0; i < vAxis.size(); ++i)
44 {
45 if (vAxis[i] < 0)
46 {
47 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
48 } else
49 {
50 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
51 }
52
53 updated_idx.push_back(resolvedAxis);
54 }
55
56 descriptor.m_Parameters.m_vAxis = updated_idx;
57 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
58 descriptor.m_Parameters.m_KeepDims = keepDims;
59 armnn::WorkloadInfo info;
60
61 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
62 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
63
64 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReduce(descriptor, info);
65
66 inputHandle->Allocate();
67 outputHandle->Allocate();
68
Sadik Armagan483c8112021-06-01 09:24:52 +010069 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
Sadik Armagana2747482021-02-09 10:28:54 +000070
71 workload->Execute();
72
Sadik Armagan483c8112021-06-01 09:24:52 +010073 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Sadik Armagana2747482021-02-09 10:28:54 +000074
Sadik Armagan483c8112021-06-01 09:24:52 +010075 return LayerTestResult<float, 4>(actualOutput,
76 outputData,
77 outputHandle->GetShape(),
78 outputTensorInfo.GetShape());
Sadik Armagana2747482021-02-09 10:28:54 +000079}
80
81} // namespace
82
83template<armnn::DataType ArmnnType, typename T>
84LayerTestResult<float, 4> ReduceMaxSimpleTest(
85 armnn::IWorkloadFactory& workloadFactory,
86 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
87 const armnn::ITensorHandleFactory& tensorHandleFactory)
88{
89 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
90 const armnn::TensorShape outputShape{ 1, 1, 1, 3};
91
92 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
93
94 if (armnn::IsQuantizedType<T>())
95 {
96 inputTensorInfo.SetQuantizationScale(1.0f);
97 inputTensorInfo.SetQuantizationOffset(0);
98 }
99
100 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
101
102 std::vector<float> inputValues
103 ({
104 1001.0f, 11.0f, 1003.0f,
105 10.0f, 1002.0f, 12.0f
106 });
107 std::vector<float> outputValues
108 ({
109 1001.0f, 1002.0f, 1003.0f
110 });
111
112 return ReductionTestCommon<ArmnnType>(workloadFactory,
113 memoryManager,
114 tensorHandleFactory,
115 inputTensorInfo,
116 outputTensorInfo,
117 inputValues,
118 outputValues,
119 { 2 },
120 armnn::ReduceOperation::Max);
121}
122
123template<armnn::DataType ArmnnType, typename T>
124LayerTestResult<float, 4> ReduceMaxNegativeAxisTest(
125 armnn::IWorkloadFactory& workloadFactory,
126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
127 const armnn::ITensorHandleFactory& tensorHandleFactory)
128{
129 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
130 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
131
132 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
133
134 if (armnn::IsQuantizedType<T>())
135 {
136 inputTensorInfo.SetQuantizationScale(1.0f);
137 inputTensorInfo.SetQuantizationOffset(0);
138 }
139
140 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
141
142 std::vector<float> inputValues
143 ({
144 1001.0f, 11.0f, 1003.0f,
145 10.0f, 1002.0f, 12.0f
146 });
147 std::vector<float> outputValues
148 ({
149 1003.0f, 1002.0f
150 });
151
152 return ReductionTestCommon<ArmnnType>(workloadFactory,
153 memoryManager,
154 tensorHandleFactory,
155 inputTensorInfo,
156 outputTensorInfo,
157 inputValues,
158 outputValues,
159 { -1 },
160 armnn::ReduceOperation::Max,
161 true);
162}
163
164template<armnn::DataType ArmnnType, typename T>
165LayerTestResult<float, 4> ReduceMaxSimpleTest2(
166 armnn::IWorkloadFactory& workloadFactory,
167 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
168 const armnn::ITensorHandleFactory& tensorHandleFactory)
169{
170 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
171 const armnn::TensorShape outputShape{ 1, 1, 2, 1 };
172
173 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
174
175 if (armnn::IsQuantizedType<T>())
176 {
177 inputTensorInfo.SetQuantizationScale(1.0f);
178 inputTensorInfo.SetQuantizationOffset(0);
179 }
180
181 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
182
183 std::vector<float> inputValues
184 ({
185 1.0f, 3.0f, 2.0f,
186 6.0f, 4.0f, 5.0f
187 });
188
189 std::vector<float> outputValues
190 ({
191 3.0f, 6.0f
192 });
193
194 return ReductionTestCommon<ArmnnType>(workloadFactory,
195 memoryManager,
196 tensorHandleFactory,
197 inputTensorInfo,
198 outputTensorInfo,
199 inputValues,
200 outputValues,
201 { 3 },
202 armnn::ReduceOperation::Max,
203 true);
204}
205
206template<armnn::DataType ArmnnType, typename T>
207LayerTestResult<float, 4> ReduceMinSimpleTest(
208 armnn::IWorkloadFactory& workloadFactory,
209 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
210 const armnn::ITensorHandleFactory& tensorHandleFactory)
211{
212 const armnn::TensorShape inputShape { 1, 1, 2, 3 };
213 const armnn::TensorShape outputShape { 1, 1, 1, 3};
214
215 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
216
217 if (armnn::IsQuantizedType<T>())
218 {
219 inputTensorInfo.SetQuantizationScale(1.0f);
220 inputTensorInfo.SetQuantizationOffset(0);
221 }
222
223 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
224
225 std::vector<float> inputValues
226 ({
227 1001.0f, 11.0f, 1003.0f,
228 10.0f, 1002.0f, 12.0f
229 });
230 std::vector<float> outputValues
231 ({
232 10.0f, 11.0f, 12.0f
233 });
234
235 return ReductionTestCommon<ArmnnType>(workloadFactory,
236 memoryManager,
237 tensorHandleFactory,
238 inputTensorInfo,
239 outputTensorInfo,
240 inputValues,
241 outputValues,
242 { 2 },
243 armnn::ReduceOperation::Min);
244}
245
246template<armnn::DataType ArmnnType, typename T>
247LayerTestResult<float, 4> ReduceMinNegativeAxisTest(
248 armnn::IWorkloadFactory& workloadFactory,
249 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
250 const armnn::ITensorHandleFactory& tensorHandleFactory)
251{
252 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
253 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
254
255 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
256
257 if (armnn::IsQuantizedType<T>())
258 {
259 inputTensorInfo.SetQuantizationScale(1.0f);
260 inputTensorInfo.SetQuantizationOffset(0);
261 }
262
263 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
264
265 std::vector<float> inputValues
266 ({
267 1001.0f, 11.0f, 1003.0f,
268 10.0f, 1002.0f, 12.0f
269 });
270 std::vector<float> outputValues
271 ({
272 11.0f, 10.0f
273 });
274
275 return ReductionTestCommon<ArmnnType>(workloadFactory,
276 memoryManager,
277 tensorHandleFactory,
278 inputTensorInfo,
279 outputTensorInfo,
280 inputValues,
281 outputValues,
282 { -1 },
283 armnn::ReduceOperation::Min,
284 true);
285}
286
287// Explicit template specializations
288template LayerTestResult<float, 4>
289ReduceMaxSimpleTest<armnn::DataType::Float32>(
290 armnn::IWorkloadFactory& workloadFactory,
291 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
292 const armnn::ITensorHandleFactory& tensorHandleFactory);
293
294template LayerTestResult<float, 4>
295ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
296 armnn::IWorkloadFactory& workloadFactory,
297 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
298 const armnn::ITensorHandleFactory& tensorHandleFactory);
299
300template LayerTestResult<float, 4>
301ReduceMaxSimpleTest2<armnn::DataType::Float32>(
302 armnn::IWorkloadFactory& workloadFactory,
303 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
304 const armnn::ITensorHandleFactory& tensorHandleFactory);
305
306template LayerTestResult<float, 4>
307ReduceMinSimpleTest<armnn::DataType::Float32>(
308 armnn::IWorkloadFactory& workloadFactory,
309 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
310 const armnn::ITensorHandleFactory& tensorHandleFactory);
311
312template LayerTestResult<float, 4>
313ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
314 armnn::IWorkloadFactory& workloadFactory,
315 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
316 const armnn::ITensorHandleFactory& tensorHandleFactory);
317