blob: 396cc1bcb277aa0e1e42c239a535f0b4d8f90f60 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
7#include <armnn/ArmNN.hpp>
8#include <armnn/Tensor.hpp>
telsoa014fcda012018-03-09 14:13:49 +00009
David Beckac42efd2018-09-26 17:41:13 +010010#include <test/TensorHelpers.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011
David Beckac42efd2018-09-26 17:41:13 +010012#include <backends/CpuTensorHandle.hpp>
13#include <backends/WorkloadFactory.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beckac42efd2018-09-26 17:41:13 +010015#include <backends/test/QuantizeHelper.hpp>
telsoa014fcda012018-03-09 14:13:49 +000016
17
18template<typename T>
19std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
20 float qScale = 0.0f,
21 int32_t qOffset = 0)
22{
23 unsigned int inputWidth = 5;
24 unsigned int inputHeight = 6;
25 unsigned int inputChannels = 3;
26
surmeh013537c2c2018-05-18 16:31:43 +010027 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
28 // cannot be split.
telsoa01c577f2c2018-08-31 09:22:23 +010029 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
surmeh013537c2c2018-05-18 16:31:43 +010030 //
telsoa01c577f2c2018-08-31 09:22:23 +010031 // This test has therefore been recast to split the channels, then split the resulting subtensor.
telsoa014fcda012018-03-09 14:13:49 +000032
telsoa01c577f2c2018-08-31 09:22:23 +010033 // To take channel 0 of original output
34 // and channel 0 and channel 1 of the split subtensor.
surmeh013537c2c2018-05-18 16:31:43 +010035 unsigned int outputWidth1 = inputWidth;
36 unsigned int outputHeight1 = inputHeight;
37 unsigned int outputChannels1 = 1;
telsoa014fcda012018-03-09 14:13:49 +000038
telsoa01c577f2c2018-08-31 09:22:23 +010039 // To take channel 1 and 2 of the original output.
surmeh013537c2c2018-05-18 16:31:43 +010040 unsigned int outputWidth2 = inputWidth;
41 unsigned int outputHeight2 = inputHeight;
42 unsigned int outputChannels2 = 2;
telsoa014fcda012018-03-09 14:13:49 +000043
44
telsoa01c577f2c2018-08-31 09:22:23 +010045 // Define the tensor descriptors.
telsoa014fcda012018-03-09 14:13:49 +000046 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
surmeh013537c2c2018-05-18 16:31:43 +010047
telsoa01c577f2c2018-08-31 09:22:23 +010048 // Outputs of the original split.
telsoa014fcda012018-03-09 14:13:49 +000049 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
50 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
surmeh013537c2c2018-05-18 16:31:43 +010051
telsoa01c577f2c2018-08-31 09:22:23 +010052 // Outputs of the subsequent subtensor split.
surmeh013537c2c2018-05-18 16:31:43 +010053 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
54 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +000055
56 // Set quantization parameters if the requested type is a quantized type.
telsoa01c577f2c2018-08-31 09:22:23 +010057 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
telsoa014fcda012018-03-09 14:13:49 +000058 if(armnn::IsQuantizedType<T>())
59 {
60 inputTensorInfo.SetQuantizationScale(qScale);
61 inputTensorInfo.SetQuantizationOffset(qOffset);
62 outputTensorInfo1.SetQuantizationScale(qScale);
63 outputTensorInfo1.SetQuantizationOffset(qOffset);
64 outputTensorInfo2.SetQuantizationScale(qScale);
65 outputTensorInfo2.SetQuantizationOffset(qOffset);
66 outputTensorInfo3.SetQuantizationScale(qScale);
67 outputTensorInfo3.SetQuantizationOffset(qOffset);
68 outputTensorInfo4.SetQuantizationScale(qScale);
69 outputTensorInfo4.SetQuantizationOffset(qOffset);
telsoa014fcda012018-03-09 14:13:49 +000070 }
71
72 LayerTestResult<T,3> ret1(outputTensorInfo1);
73 LayerTestResult<T,3> ret2(outputTensorInfo2);
74 LayerTestResult<T,3> ret3(outputTensorInfo3);
75 LayerTestResult<T,3> ret4(outputTensorInfo4);
telsoa014fcda012018-03-09 14:13:49 +000076
77 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
78 QuantizedVector<T>(qScale, qOffset, {
79 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
80 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
81 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
82 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
83 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
84 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
85
86 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
87 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
88 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
89 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
90 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
91 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
92
93 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
94 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
95 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
96 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
97 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
98 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
99 })
100 ));
101
telsoa01c577f2c2018-08-31 09:22:23 +0100102 // Channel 0 of the original input.
telsoa014fcda012018-03-09 14:13:49 +0000103 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
104 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100105 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
106 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
107 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
108 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
109 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
110 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
telsoa014fcda012018-03-09 14:13:49 +0000111 })
112 ));
113
telsoa01c577f2c2018-08-31 09:22:23 +0100114 // Channel 1 & 2 of the original input.
telsoa014fcda012018-03-09 14:13:49 +0000115 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
116 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100117 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
118 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
119 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
120 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
121 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
122 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
telsoa014fcda012018-03-09 14:13:49 +0000123
surmeh013537c2c2018-05-18 16:31:43 +0100124 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
125 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
126 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
127 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
128 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
129 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
telsoa014fcda012018-03-09 14:13:49 +0000130 })
131 ));
132
telsoa01c577f2c2018-08-31 09:22:23 +0100133 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
telsoa014fcda012018-03-09 14:13:49 +0000134 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
135 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100136 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
137 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
138 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
139 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
140 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
141 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
telsoa014fcda012018-03-09 14:13:49 +0000142 })
143 ));
144
telsoa01c577f2c2018-08-31 09:22:23 +0100145 // Channel 1 of return 2.
telsoa014fcda012018-03-09 14:13:49 +0000146 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
147 QuantizedVector<T>(qScale, qOffset, {
surmeh013537c2c2018-05-18 16:31:43 +0100148 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
149 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
150 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
151 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
152 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
153 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
telsoa014fcda012018-03-09 14:13:49 +0000154 })
155 ));
156
telsoa01c577f2c2018-08-31 09:22:23 +0100157 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
surmeh013537c2c2018-05-18 16:31:43 +0100158 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
telsoa01c577f2c2018-08-31 09:22:23 +0100159 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
160 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
telsoa014fcda012018-03-09 14:13:49 +0000161 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
162
telsoa01c577f2c2018-08-31 09:22:23 +0100163 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
telsoa014fcda012018-03-09 14:13:49 +0000164 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
165
telsoa01c577f2c2018-08-31 09:22:23 +0100166 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
telsoa014fcda012018-03-09 14:13:49 +0000167 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
168
telsoa01c577f2c2018-08-31 09:22:23 +0100169 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
telsoa014fcda012018-03-09 14:13:49 +0000170 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
171
172 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
173
174 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
175
176 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
177 subTensorsSupported ?
178 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
179 workloadFactory.CreateTensorHandle(outputTensorInfo1);
180
181 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
182 subTensorsSupported ?
183 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
184 workloadFactory.CreateTensorHandle(outputTensorInfo2);
185
186 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
187 subTensorsSupported ?
surmeh013537c2c2018-05-18 16:31:43 +0100188 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
telsoa014fcda012018-03-09 14:13:49 +0000189 workloadFactory.CreateTensorHandle(outputTensorInfo3);
190
191 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
192 subTensorsSupported ?
surmeh013537c2c2018-05-18 16:31:43 +0100193 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
telsoa014fcda012018-03-09 14:13:49 +0000194 workloadFactory.CreateTensorHandle(outputTensorInfo4);
195
surmeh013537c2c2018-05-18 16:31:43 +0100196 // Do the first split
telsoa014fcda012018-03-09 14:13:49 +0000197 armnn::SplitterQueueDescriptor data;
198 armnn::WorkloadInfo info;
199 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
200 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
201 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
telsoa014fcda012018-03-09 14:13:49 +0000202
203 data.m_ViewOrigins.push_back(window1);
204 data.m_ViewOrigins.push_back(window2);
telsoa014fcda012018-03-09 14:13:49 +0000205
206 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
207
208 inputHandle->Allocate();
209 outputHandle1->Allocate();
210 outputHandle2->Allocate();
telsoa014fcda012018-03-09 14:13:49 +0000211
212 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
213
214 workload->Execute();
215
216 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
217 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
surmeh013537c2c2018-05-18 16:31:43 +0100218
telsoa01c577f2c2018-08-31 09:22:23 +0100219// // Do the second split.
surmeh013537c2c2018-05-18 16:31:43 +0100220 armnn::SplitterQueueDescriptor data2;
221 armnn::WorkloadInfo info2;
222 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
223 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
224 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
225
226 data2.m_ViewOrigins.push_back(window3);
227 data2.m_ViewOrigins.push_back(window4);
228
229 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
230
231 outputHandle3->Allocate();
232 outputHandle4->Allocate();
233
234 workload2->Execute();
235
telsoa014fcda012018-03-09 14:13:49 +0000236 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
237 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
telsoa014fcda012018-03-09 14:13:49 +0000238
surmeh013537c2c2018-05-18 16:31:43 +0100239 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
telsoa014fcda012018-03-09 14:13:49 +0000240
241 return ret;
242}
243
244
245template <typename T>
246LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
247{
248 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
249 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
250 {
251 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
252 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
253 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
254 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
255 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
256 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
257
258 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
259 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
260 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
261 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
262 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
263 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
264
265 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
266 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
267 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
268 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
269 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
270 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
271 }));
272
273 std::vector<unsigned int> origin = { 0, 0, 0 };
274 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
275
276 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
277
278 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
279
280 std::unique_ptr<armnn::ITensorHandle> outputHandle =
281 subTensorsSupported ?
282 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
283 workloadFactory.CreateTensorHandle(tensorInfo);
284
285 armnn::SplitterQueueDescriptor data;
286 armnn::WorkloadInfo info;
287 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
288 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
289
290 data.m_ViewOrigins.push_back(window);
291
292 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
293
294 inputHandle->Allocate();
295 outputHandle->Allocate();
296
297 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
298
299 workload->Execute();
300
301 LayerTestResult<T, 3> ret(tensorInfo);
302 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
303 ret.outputExpected = input;
304
305 return ret;
306}