blob: e88356ce21bca39b38ef81cb7c57238934b91f10 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
8
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/ArmNN.hpp>
10#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000013#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000014#include <backendsCommon/WorkloadFactory.hpp>
15#include <backendsCommon/test/QuantizeHelper.hpp>
16
David Beckac42efd2018-09-26 17:41:13 +010017#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
telsoa014fcda012018-03-09 14:13:49 +000019template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020std::vector<LayerTestResult<T,3>> SplitterTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 float qScale = 0.0f,
24 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +000025{
26 unsigned int inputWidth = 5;
27 unsigned int inputHeight = 6;
28 unsigned int inputChannels = 3;
29
surmeh013537c2c2018-05-18 16:31:43 +010030 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
31 // cannot be split.
telsoa01c577f2c2018-08-31 09:22:23 +010032 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
surmeh013537c2c2018-05-18 16:31:43 +010033 //
telsoa01c577f2c2018-08-31 09:22:23 +010034 // This test has therefore been recast to split the channels, then split the resulting subtensor.
telsoa014fcda012018-03-09 14:13:49 +000035
telsoa01c577f2c2018-08-31 09:22:23 +010036 // To take channel 0 of original output
37 // and channel 0 and channel 1 of the split subtensor.
surmeh013537c2c2018-05-18 16:31:43 +010038 unsigned int outputWidth1 = inputWidth;
39 unsigned int outputHeight1 = inputHeight;
40 unsigned int outputChannels1 = 1;
telsoa014fcda012018-03-09 14:13:49 +000041
telsoa01c577f2c2018-08-31 09:22:23 +010042 // To take channel 1 and 2 of the original output.
surmeh013537c2c2018-05-18 16:31:43 +010043 unsigned int outputWidth2 = inputWidth;
44 unsigned int outputHeight2 = inputHeight;
45 unsigned int outputChannels2 = 2;
telsoa014fcda012018-03-09 14:13:49 +000046
47
telsoa01c577f2c2018-08-31 09:22:23 +010048 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000049 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
surmeh013537c2c2018-05-18 16:31:43 +010050
telsoa01c577f2c2018-08-31 09:22:23 +010051 // Outputs of the original split.
telsoa014fcda012018-03-09 14:13:49 +000052 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
53 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
surmeh013537c2c2018-05-18 16:31:43 +010054
telsoa01c577f2c2018-08-31 09:22:23 +010055 // Outputs of the subsequent subtensor split.
surmeh013537c2c2018-05-18 16:31:43 +010056 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
57 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000058
59 // Set quantization parameters if the requested type is a quantized type.
telsoa01c577f2c2018-08-31 09:22:23 +010060 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
telsoa014fcda012018-03-09 14:13:49 +000061 if(armnn::IsQuantizedType<T>())
62 {
63 inputTensorInfo.SetQuantizationScale(qScale);
64 inputTensorInfo.SetQuantizationOffset(qOffset);
65 outputTensorInfo1.SetQuantizationScale(qScale);
66 outputTensorInfo1.SetQuantizationOffset(qOffset);
67 outputTensorInfo2.SetQuantizationScale(qScale);
68 outputTensorInfo2.SetQuantizationOffset(qOffset);
69 outputTensorInfo3.SetQuantizationScale(qScale);
70 outputTensorInfo3.SetQuantizationOffset(qOffset);
71 outputTensorInfo4.SetQuantizationScale(qScale);
72 outputTensorInfo4.SetQuantizationOffset(qOffset);
telsoa014fcda012018-03-09 14:13:49 +000073 }
74
75 LayerTestResult<T,3> ret1(outputTensorInfo1);
76 LayerTestResult<T,3> ret2(outputTensorInfo2);
77 LayerTestResult<T,3> ret3(outputTensorInfo3);
78 LayerTestResult<T,3> ret4(outputTensorInfo4);
telsoa014fcda012018-03-09 14:13:49 +000079
80 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
81 QuantizedVector<T>(qScale, qOffset, {
82 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
83 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
84 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
85 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
86 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
87 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
88
89 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
90 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
91 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
92 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
93 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
94 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
95
96 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
97 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
98 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
99 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
100 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
101 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
102 })
103 ));
104
telsoa01c577f2c2018-08-31 09:22:23 +0100105 // Channel 0 of the original input.
telsoa014fcda012018-03-09 14:13:49 +0000106 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
107 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100108 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
109 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
110 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
111 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
112 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
113 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
telsoa014fcda012018-03-09 14:13:49 +0000114 })
115 ));
116
telsoa01c577f2c2018-08-31 09:22:23 +0100117 // Channel 1 & 2 of the original input.
telsoa014fcda012018-03-09 14:13:49 +0000118 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
119 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100120 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
121 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
122 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
123 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
124 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
125 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
telsoa014fcda012018-03-09 14:13:49 +0000126
surmeh013537c2c2018-05-18 16:31:43 +0100127 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
128 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
129 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
130 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
131 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
132 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
telsoa014fcda012018-03-09 14:13:49 +0000133 })
134 ));
135
telsoa01c577f2c2018-08-31 09:22:23 +0100136 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
telsoa014fcda012018-03-09 14:13:49 +0000137 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
138 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100139 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
140 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
141 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
142 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
143 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
144 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
telsoa014fcda012018-03-09 14:13:49 +0000145 })
146 ));
147
telsoa01c577f2c2018-08-31 09:22:23 +0100148 // Channel 1 of return 2.
telsoa014fcda012018-03-09 14:13:49 +0000149 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
150 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100151 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
152 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
153 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
154 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
155 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
156 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
telsoa014fcda012018-03-09 14:13:49 +0000157 })
158 ));
159
telsoa01c577f2c2018-08-31 09:22:23 +0100160 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
surmeh013537c2c2018-05-18 16:31:43 +0100161 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
telsoa01c577f2c2018-08-31 09:22:23 +0100162 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
163 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
telsoa014fcda012018-03-09 14:13:49 +0000164 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
165
telsoa01c577f2c2018-08-31 09:22:23 +0100166 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
telsoa014fcda012018-03-09 14:13:49 +0000167 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
168
telsoa01c577f2c2018-08-31 09:22:23 +0100169 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
telsoa014fcda012018-03-09 14:13:49 +0000170 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
171
telsoa01c577f2c2018-08-31 09:22:23 +0100172 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
telsoa014fcda012018-03-09 14:13:49 +0000173 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
174
175 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
176
177 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
178
179 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
180 subTensorsSupported ?
181 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
182 workloadFactory.CreateTensorHandle(outputTensorInfo1);
183
184 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
185 subTensorsSupported ?
186 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
187 workloadFactory.CreateTensorHandle(outputTensorInfo2);
188
189 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
190 subTensorsSupported ?
surmeh013537c2c2018-05-18 16:31:43 +0100191 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
telsoa014fcda012018-03-09 14:13:49 +0000192 workloadFactory.CreateTensorHandle(outputTensorInfo3);
193
194 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
195 subTensorsSupported ?
surmeh013537c2c2018-05-18 16:31:43 +0100196 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
telsoa014fcda012018-03-09 14:13:49 +0000197 workloadFactory.CreateTensorHandle(outputTensorInfo4);
198
surmeh013537c2c2018-05-18 16:31:43 +0100199 // Do the first split
telsoa014fcda012018-03-09 14:13:49 +0000200 armnn::SplitterQueueDescriptor data;
201 armnn::WorkloadInfo info;
202 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
203 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
204 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000205
206 data.m_ViewOrigins.push_back(window1);
207 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000208
209 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
210
211 inputHandle->Allocate();
212 outputHandle1->Allocate();
213 outputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000214
215 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
216
217 workload->Execute();
218
219 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
220 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
surmeh013537c2c2018-05-18 16:31:43 +0100221
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000222 // Do the second split.
surmeh013537c2c2018-05-18 16:31:43 +0100223 armnn::SplitterQueueDescriptor data2;
224 armnn::WorkloadInfo info2;
225 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
226 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
227 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
228
229 data2.m_ViewOrigins.push_back(window3);
230 data2.m_ViewOrigins.push_back(window4);
231
232 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
233
234 outputHandle3->Allocate();
235 outputHandle4->Allocate();
236
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000237 ExecuteWorkload(*workload2, memoryManager);
surmeh013537c2c2018-05-18 16:31:43 +0100238
telsoa014fcda012018-03-09 14:13:49 +0000239 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
240 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
telsoa014fcda012018-03-09 14:13:49 +0000241
surmeh013537c2c2018-05-18 16:31:43 +0100242 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
telsoa014fcda012018-03-09 14:13:49 +0000243
244 return ret;
245}
246
247
248template <typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000249LayerTestResult<T, 3> CopyViaSplitterTestImpl(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 float qScale, int32_t qOffset)
telsoa014fcda012018-03-09 14:13:49 +0000253{
254 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
255 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
256 {
257 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
258 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
259 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
260 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
261 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
262 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
263
264 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
265 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
266 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
267 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
268 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
269 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
270
271 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
272 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
273 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
274 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
275 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
276 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
277 }));
278
279 std::vector<unsigned int> origin = { 0, 0, 0 };
280 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
281
282 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
283
284 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
285
286 std::unique_ptr<armnn::ITensorHandle> outputHandle =
287 subTensorsSupported ?
288 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
289 workloadFactory.CreateTensorHandle(tensorInfo);
290
291 armnn::SplitterQueueDescriptor data;
292 armnn::WorkloadInfo info;
293 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
294 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
295
296 data.m_ViewOrigins.push_back(window);
297
298 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
299
300 inputHandle->Allocate();
301 outputHandle->Allocate();
302
303 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
304
305 workload->Execute();
306
307 LayerTestResult<T, 3> ret(tensorInfo);
308 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
309 ret.outputExpected = input;
310
311 return ret;
312}