telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | #pragma once |
| 6 | |
| 7 | #include <armnn/ArmNN.hpp> |
| 8 | #include <armnn/Tensor.hpp> |
| 9 | #include <backends/WorkloadInfo.hpp> |
| 10 | |
| 11 | #include "test/TensorHelpers.hpp" |
| 12 | |
| 13 | #include "backends/CpuTensorHandle.hpp" |
| 14 | #include "backends/WorkloadFactory.hpp" |
| 15 | |
| 16 | #include "backends/test/QuantizeHelper.hpp" |
| 17 | |
| 18 | |
| 19 | template<typename T> |
| 20 | std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory, |
| 21 | float qScale = 0.0f, |
| 22 | int32_t qOffset = 0) |
| 23 | { |
| 24 | unsigned int inputWidth = 5; |
| 25 | unsigned int inputHeight = 6; |
| 26 | unsigned int inputChannels = 3; |
| 27 | |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 28 | // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width) |
| 29 | // cannot be split. |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 30 | // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239 |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 31 | // |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 32 | // This test has therefore been recast to split the channels, then split the resulting subtensor. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 33 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 34 | // To take channel 0 of original output |
| 35 | // and channel 0 and channel 1 of the split subtensor. |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 36 | unsigned int outputWidth1 = inputWidth; |
| 37 | unsigned int outputHeight1 = inputHeight; |
| 38 | unsigned int outputChannels1 = 1; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 39 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 40 | // To take channel 1 and 2 of the original output. |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 41 | unsigned int outputWidth2 = inputWidth; |
| 42 | unsigned int outputHeight2 = inputHeight; |
| 43 | unsigned int outputChannels2 = 2; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 44 | |
| 45 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 46 | // Define the tensor descriptors. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 47 | armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>()); |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 48 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 49 | // Outputs of the original split. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 50 | armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>()); |
| 51 | armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>()); |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 52 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 53 | // Outputs of the subsequent subtensor split. |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 54 | armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>()); |
| 55 | armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 56 | |
| 57 | // Set quantization parameters if the requested type is a quantized type. |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 58 | // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 59 | if(armnn::IsQuantizedType<T>()) |
| 60 | { |
| 61 | inputTensorInfo.SetQuantizationScale(qScale); |
| 62 | inputTensorInfo.SetQuantizationOffset(qOffset); |
| 63 | outputTensorInfo1.SetQuantizationScale(qScale); |
| 64 | outputTensorInfo1.SetQuantizationOffset(qOffset); |
| 65 | outputTensorInfo2.SetQuantizationScale(qScale); |
| 66 | outputTensorInfo2.SetQuantizationOffset(qOffset); |
| 67 | outputTensorInfo3.SetQuantizationScale(qScale); |
| 68 | outputTensorInfo3.SetQuantizationOffset(qOffset); |
| 69 | outputTensorInfo4.SetQuantizationScale(qScale); |
| 70 | outputTensorInfo4.SetQuantizationOffset(qOffset); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | LayerTestResult<T,3> ret1(outputTensorInfo1); |
| 74 | LayerTestResult<T,3> ret2(outputTensorInfo2); |
| 75 | LayerTestResult<T,3> ret3(outputTensorInfo3); |
| 76 | LayerTestResult<T,3> ret4(outputTensorInfo4); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 77 | |
| 78 | auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>( |
| 79 | QuantizedVector<T>(qScale, qOffset, { |
| 80 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, |
| 81 | 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, |
| 82 | 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, |
| 83 | 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, |
| 84 | 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, |
| 85 | 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, |
| 86 | |
| 87 | 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, |
| 88 | 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, |
| 89 | 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, |
| 90 | 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, |
| 91 | 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, |
| 92 | 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, |
| 93 | |
| 94 | 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, |
| 95 | 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, |
| 96 | 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, |
| 97 | 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, |
| 98 | 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, |
| 99 | 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, |
| 100 | }) |
| 101 | )); |
| 102 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 103 | // Channel 0 of the original input. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 104 | ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>( |
| 105 | QuantizedVector<T>(qScale, qOffset, { |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 106 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, |
| 107 | 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, |
| 108 | 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, |
| 109 | 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, |
| 110 | 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, |
| 111 | 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 112 | }) |
| 113 | )); |
| 114 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 115 | // Channel 1 & 2 of the original input. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 116 | ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>( |
| 117 | QuantizedVector<T>(qScale, qOffset, { |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 118 | 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, |
| 119 | 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, |
| 120 | 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, |
| 121 | 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, |
| 122 | 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, |
| 123 | 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 124 | |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 125 | 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, |
| 126 | 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, |
| 127 | 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, |
| 128 | 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, |
| 129 | 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, |
| 130 | 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 131 | }) |
| 132 | )); |
| 133 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 134 | // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input). |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 135 | ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>( |
| 136 | QuantizedVector<T>(qScale, qOffset, { |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 137 | 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, |
| 138 | 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, |
| 139 | 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, |
| 140 | 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, |
| 141 | 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, |
| 142 | 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 143 | }) |
| 144 | )); |
| 145 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 146 | // Channel 1 of return 2. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 147 | ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>( |
| 148 | QuantizedVector<T>(qScale, qOffset, { |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 149 | 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, |
| 150 | 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, |
| 151 | 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, |
| 152 | 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, |
| 153 | 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, |
| 154 | 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 155 | }) |
| 156 | )); |
| 157 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 158 | // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 159 | // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 160 | // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels. |
| 161 | std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0]. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 162 | armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1); |
| 163 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 164 | std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1]. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 165 | armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2); |
| 166 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 167 | std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2]. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 168 | armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3); |
| 169 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 170 | std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3]. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 171 | armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4); |
| 172 | |
| 173 | bool subTensorsSupported = workloadFactory.SupportsSubTensors(); |
| 174 | |
| 175 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); |
| 176 | |
| 177 | std::unique_ptr<armnn::ITensorHandle> outputHandle1 = |
| 178 | subTensorsSupported ? |
| 179 | workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) : |
| 180 | workloadFactory.CreateTensorHandle(outputTensorInfo1); |
| 181 | |
| 182 | std::unique_ptr<armnn::ITensorHandle> outputHandle2 = |
| 183 | subTensorsSupported ? |
| 184 | workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) : |
| 185 | workloadFactory.CreateTensorHandle(outputTensorInfo2); |
| 186 | |
| 187 | std::unique_ptr<armnn::ITensorHandle> outputHandle3 = |
| 188 | subTensorsSupported ? |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 189 | workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) : |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 190 | workloadFactory.CreateTensorHandle(outputTensorInfo3); |
| 191 | |
| 192 | std::unique_ptr<armnn::ITensorHandle> outputHandle4 = |
| 193 | subTensorsSupported ? |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 194 | workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) : |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 195 | workloadFactory.CreateTensorHandle(outputTensorInfo4); |
| 196 | |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 197 | // Do the first split |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 198 | armnn::SplitterQueueDescriptor data; |
| 199 | armnn::WorkloadInfo info; |
| 200 | AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); |
| 201 | AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get()); |
| 202 | AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 203 | |
| 204 | data.m_ViewOrigins.push_back(window1); |
| 205 | data.m_ViewOrigins.push_back(window2); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 206 | |
| 207 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info); |
| 208 | |
| 209 | inputHandle->Allocate(); |
| 210 | outputHandle1->Allocate(); |
| 211 | outputHandle2->Allocate(); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 212 | |
| 213 | CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); |
| 214 | |
| 215 | workload->Execute(); |
| 216 | |
| 217 | CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get()); |
| 218 | CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get()); |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 219 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 220 | // // Do the second split. |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 221 | armnn::SplitterQueueDescriptor data2; |
| 222 | armnn::WorkloadInfo info2; |
| 223 | AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get()); |
| 224 | AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get()); |
| 225 | AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get()); |
| 226 | |
| 227 | data2.m_ViewOrigins.push_back(window3); |
| 228 | data2.m_ViewOrigins.push_back(window4); |
| 229 | |
| 230 | std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2); |
| 231 | |
| 232 | outputHandle3->Allocate(); |
| 233 | outputHandle4->Allocate(); |
| 234 | |
| 235 | workload2->Execute(); |
| 236 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 237 | CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get()); |
| 238 | CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get()); |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 239 | |
surmeh01 | 3537c2c | 2018-05-18 16:31:43 +0100 | [diff] [blame] | 240 | std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,}; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 241 | |
| 242 | return ret; |
| 243 | } |
| 244 | |
| 245 | |
| 246 | template <typename T> |
| 247 | LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset) |
| 248 | { |
| 249 | const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>()); |
| 250 | auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset, |
| 251 | { |
| 252 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, |
| 253 | 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, |
| 254 | 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, |
| 255 | 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, |
| 256 | 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, |
| 257 | 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, |
| 258 | |
| 259 | 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, |
| 260 | 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, |
| 261 | 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, |
| 262 | 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, |
| 263 | 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, |
| 264 | 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, |
| 265 | |
| 266 | 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, |
| 267 | 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, |
| 268 | 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, |
| 269 | 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, |
| 270 | 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, |
| 271 | 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, |
| 272 | })); |
| 273 | |
| 274 | std::vector<unsigned int> origin = { 0, 0, 0 }; |
| 275 | armnn::SplitterQueueDescriptor::ViewOrigin window(origin); |
| 276 | |
| 277 | const bool subTensorsSupported = workloadFactory.SupportsSubTensors(); |
| 278 | |
| 279 | std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo); |
| 280 | |
| 281 | std::unique_ptr<armnn::ITensorHandle> outputHandle = |
| 282 | subTensorsSupported ? |
| 283 | workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) : |
| 284 | workloadFactory.CreateTensorHandle(tensorInfo); |
| 285 | |
| 286 | armnn::SplitterQueueDescriptor data; |
| 287 | armnn::WorkloadInfo info; |
| 288 | AddInputToWorkload(data, info, tensorInfo, inputHandle.get()); |
| 289 | AddOutputToWorkload(data, info, tensorInfo, outputHandle.get()); |
| 290 | |
| 291 | data.m_ViewOrigins.push_back(window); |
| 292 | |
| 293 | std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info); |
| 294 | |
| 295 | inputHandle->Allocate(); |
| 296 | outputHandle->Allocate(); |
| 297 | |
| 298 | CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]); |
| 299 | |
| 300 | workload->Execute(); |
| 301 | |
| 302 | LayerTestResult<T, 3> ret(tensorInfo); |
| 303 | CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get()); |
| 304 | ret.outputExpected = input; |
| 305 | |
| 306 | return ret; |
| 307 | } |