blob: 8d292c84bbc84b1d34679c6cf3a0c8fd57dd26f7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00008#include "TensorUtils.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00009
Matteo Martincigh21350152018-11-28 16:22:22 +000010#include <Permute.hpp>
11#include <DataLayoutIndexed.hpp>
12
13#include <test/TensorHelpers.hpp>
14
telsoa014fcda012018-03-09 14:13:49 +000015#include <armnn/ArmNN.hpp>
16#include <armnn/Tensor.hpp>
17#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000018
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000020#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000021#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincigh21350152018-11-28 16:22:22 +000022#include <backendsCommon/test/QuantizeHelper.hpp>
23
jimfly010a088a62018-10-25 17:05:05 +010024#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000025
Matteo Martincigh21350152018-11-28 16:22:22 +000026#include <string>
27
telsoa014fcda012018-03-09 14:13:49 +000028// Mapping from input type to bias type for fully connected layers.
29// float => float, uint8_t => int32_t
30template<typename T>
31struct FullyConnectedBiasTypeForInputType;
32
33template<>
34struct FullyConnectedBiasTypeForInputType<float>
35{
36 using Type = float;
37};
38
39template<>
40struct FullyConnectedBiasTypeForInputType<uint8_t>
41{
42 using Type = int32_t;
43};
44
telsoa01c577f2c2018-08-31 09:22:23 +010045// Modifies a std::vector in-place using a specified bias.
telsoa014fcda012018-03-09 14:13:49 +000046template<typename T, typename B>
47void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
48 const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
49{
50 BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
51 "Invalid type and parameter combination.");
52 BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
53 "Invalid type and parameter combination.");
54
telsoa01c577f2c2018-08-31 09:22:23 +010055 // Note we need to dequantize and re-quantize the image value and the bias.
telsoa014fcda012018-03-09 14:13:49 +000056 for (uint32_t i = 0; i < bias.size(); ++i)
57 {
58 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
59 for (uint32_t y = 0; y < h; ++y)
60 {
61 for (uint32_t x = 0; x < w; ++x)
62 {
63 uint32_t offset = (i * h + y) * w + x;
64 BOOST_ASSERT(offset < v.size());
65 T& outRef = v[offset];
66 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
67 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
68 }
69 }
70 }
71}
72
telsoa014fcda012018-03-09 14:13:49 +000073template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000074LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
75 armnn::IWorkloadFactory& workloadFactory,
76 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
77 const boost::multi_array<T, 4>& originalInput,
78 const boost::multi_array<T, 4>& originalKernel,
79 const boost::multi_array<B, 1>& bias,
80 const boost::multi_array<T, 4>& originalOutputExpected,
81 float qScale,
82 int32_t qOffset,
Matthew Bentham8800c002018-11-19 13:19:28 +000083 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000084 uint32_t padLeft = 0,
85 uint32_t padTop = 0,
86 uint32_t padRight = 0,
Mike Kelly7332ed82018-12-20 17:03:06 +000087 uint32_t padBottom = 0,
88 uint32_t strideX = 1,
89 uint32_t strideY = 1)
telsoa014fcda012018-03-09 14:13:49 +000090{
jimfly010a088a62018-10-25 17:05:05 +010091 unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
92 unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
93 unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
94 unsigned int inputNum = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +000095
jimfly010a088a62018-10-25 17:05:05 +010096 unsigned int outputHeight = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
97 unsigned int outputWidth = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
98 unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
99 unsigned int outputNum = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +0000100
jimfly010a088a62018-10-25 17:05:05 +0100101 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
102 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
103 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
104 unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +0000105
106 bool biasEnabled = bias.size() > 0;
107
telsoa01c577f2c2018-08-31 09:22:23 +0100108 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
telsoa014fcda012018-03-09 14:13:49 +0000109 BOOST_ASSERT(inputNum == 1);
110 BOOST_ASSERT(outputNum == 1);
111
telsoa01c577f2c2018-08-31 09:22:23 +0100112 // If a bias is used, its size must equal the number of output channels.
telsoa014fcda012018-03-09 14:13:49 +0000113 BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
114
115
telsoa01c577f2c2018-08-31 09:22:23 +0100116 // Note these tensors will use two (identical) batches.
Nina Drozdd41b2592018-11-19 13:03:36 +0000117 armnn::TensorInfo inputTensorInfo =
118 armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
119 armnn::TensorInfo outputTensorInfo =
120 armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
121 armnn::TensorInfo kernelDesc =
122 armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
telsoa014fcda012018-03-09 14:13:49 +0000123 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
124
125 // Set quantization parameters if the requested type is a quantized type.
126 if(armnn::IsQuantizedType<T>())
127 {
128 inputTensorInfo.SetQuantizationScale(qScale);
129 inputTensorInfo.SetQuantizationOffset(qOffset);
130 outputTensorInfo.SetQuantizationScale(qScale);
131 outputTensorInfo.SetQuantizationOffset(qOffset);
132 kernelDesc.SetQuantizationScale(qScale);
133 kernelDesc.SetQuantizationOffset(qOffset);
134 biasDesc.SetQuantizationScale(qScale*qScale);
135 biasDesc.SetQuantizationOffset(0);
136 }
137
138 LayerTestResult<T, 4> ret(outputTensorInfo);
139
telsoa01c577f2c2018-08-31 09:22:23 +0100140 // Construct input data - two batches of the same input image.
telsoa014fcda012018-03-09 14:13:49 +0000141 std::vector<T> inputImage;
jimfly010a088a62018-10-25 17:05:05 +0100142 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
telsoa014fcda012018-03-09 14:13:49 +0000143 std::vector<T> inputData;
144 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
145 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
jimfly010a088a62018-10-25 17:05:05 +0100146
147 // at this point if we require it permute the input data
148 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000149 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100150 {
151 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000152 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100153 inputData = tmp;
154 }
155
telsoa014fcda012018-03-09 14:13:49 +0000156 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
157
158 std::vector<T> outputImage;
jimfly010a088a62018-10-25 17:05:05 +0100159 outputImage.assign(originalOutputExpected.data(),
160 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
telsoa014fcda012018-03-09 14:13:49 +0000161
telsoa01c577f2c2018-08-31 09:22:23 +0100162 // Apply bias to output image if it is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000163 if(biasEnabled)
164 {
165 std::vector<T> biasV;
166 biasV.assign(bias.data(), bias.data() + outputChannels);
167 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
168 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
169 outputWidth, outputHeight);
170 }
171
telsoa01c577f2c2018-08-31 09:22:23 +0100172 // Construct expected output data - two identical images.
telsoa014fcda012018-03-09 14:13:49 +0000173 std::vector<T> outputData;
174 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
175 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
176
jimfly010a088a62018-10-25 17:05:05 +0100177 // at this point if we require it permute the expected output
Matthew Bentham8800c002018-11-19 13:19:28 +0000178 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100179 {
180 std::vector<T> tmp(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000181 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100182 outputData = tmp;
183 }
telsoa014fcda012018-03-09 14:13:49 +0000184 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
185
telsoa014fcda012018-03-09 14:13:49 +0000186 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
187 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
188
189 armnn::Convolution2dQueueDescriptor data;
190 armnn::WorkloadInfo info;
191 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
192 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
jimfly010a088a62018-10-25 17:05:05 +0100193 // Permute the kernel if necessary
194 boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
Matthew Bentham8800c002018-11-19 13:19:28 +0000195 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100196 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000197 armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100198 }
telsoa014fcda012018-03-09 14:13:49 +0000199 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
200
201 if(biasEnabled)
202 {
203 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
204 }
205
206 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
207 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
208
209 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100210 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
telsoa014fcda012018-03-09 14:13:49 +0000211 data.m_Parameters.m_StrideX = strideX;
212 data.m_Parameters.m_StrideY = strideY;
213 data.m_Parameters.m_PadLeft = padLeft;
214 data.m_Parameters.m_PadRight = padRight;
215 data.m_Parameters.m_PadTop = padTop;
216 data.m_Parameters.m_PadBottom = padBottom;
217 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000218 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000219
220 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
221 inputHandle->Allocate();
222 outputHandle->Allocate();
223
224 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
225
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000226 ExecuteWorkload(*workload, memoryManager);
surmeh013537c2c2018-05-18 16:31:43 +0100227
228 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
229
230 return ret;
231}
232
233template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000234LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
235 armnn::IWorkloadFactory& workloadFactory,
236 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
237 const boost::multi_array<T, 4>& input,
238 const boost::multi_array<T, 4>& kernel,
239 const boost::multi_array<B, 1>& bias,
240 const boost::multi_array<T, 4>& outputExpected,
Mike Kelly7332ed82018-12-20 17:03:06 +0000241 const armnn::DataLayout dataLayout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000242 float qScale,
243 int32_t qOffset,
244 uint32_t padLeft = 1,
245 uint32_t padTop = 1,
246 uint32_t padRight = 1,
247 uint32_t padBottom = 1,
248 uint32_t strideX = 1,
249 uint32_t strideY = 1)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100250{
251 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
252 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
253 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
254 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
255
256 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
257 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
258 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
259 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
260
261 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
262 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
263 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
264 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
265
266 bool biasEnabled = bias.size() > 0;
267
268 // Creates the tensors.
269 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
270 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
271 armnn::GetDataType<T>());
272 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
273 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
274
275 // Construct the input data.
276 std::vector<T> inputData;
277 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
278 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
279
280 // Construct the output data, with bias applied, as appropriate.
281 std::vector<T> outputData;
282 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
283
284 LayerTestResult<T, 4> ret(outputTensorInfo);
285 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
286
287 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
288 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
289
290 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
291 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
292
293 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
294
295 armnn::Convolution2dQueueDescriptor data;
296
297 data.m_Weight = &weightsTensor;
298 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
299 data.m_Parameters.m_StrideX = strideX;
300 data.m_Parameters.m_StrideY = strideY;
301 data.m_Parameters.m_PadLeft = padLeft;
302 data.m_Parameters.m_PadRight = padRight;
303 data.m_Parameters.m_PadTop = padTop;
304 data.m_Parameters.m_PadBottom = padBottom;
305 data.m_Parameters.m_BiasEnabled = biasEnabled;
306 data.m_Parameters.m_DataLayout = dataLayout;
307
308 armnn::WorkloadInfo info;
309 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
310 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
311
312 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
313 inputHandle->Allocate();
314 outputHandle->Allocate();
315
316 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
317
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000318 ExecuteWorkload(*workload, memoryManager);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100319
320 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
321
322 return ret;
323}
324
325template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000326LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
327 armnn::IWorkloadFactory& workloadFactory,
328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
329 const boost::multi_array<T, 4>& input,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000330 const boost::multi_array<T, 4>& kernel,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000331 const boost::multi_array<B, 1>& bias,
332 const boost::multi_array<T, 4>& outputExpected,
333 float qScale,
334 int32_t qOffset,
Matthew Bentham8800c002018-11-19 13:19:28 +0000335 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000336 uint32_t padLeft = 0,
337 uint32_t padTop = 0,
338 uint32_t padRight = 0,
339 uint32_t padBottom = 0,
340 uint32_t strideX = 1,
341 uint32_t strideY = 1)
surmeh013537c2c2018-05-18 16:31:43 +0100342{
343 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
344 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]);
345 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]);
346 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000347 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
348 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
349 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
350 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
surmeh013537c2c2018-05-18 16:31:43 +0100351 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
352 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
353 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
354 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
355
telsoa01c577f2c2018-08-31 09:22:23 +0100356 // If a bias is used, its size must equal the number of output channels.
surmeh013537c2c2018-05-18 16:31:43 +0100357 bool biasEnabled = bias.size() > 0;
358 BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
359
telsoa01c577f2c2018-08-31 09:22:23 +0100360 // Creates the tensors.
Nina Drozdd41b2592018-11-19 13:03:36 +0000361 armnn::TensorInfo inputTensorInfo =
362 armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
363 armnn::TensorInfo outputTensorInfo =
364 armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000365 armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
surmeh013537c2c2018-05-18 16:31:43 +0100366 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
367
368 // Set quantization parameters if the requested type is a quantized type.
369 if (armnn::IsQuantizedType<T>())
370 {
371 inputTensorInfo.SetQuantizationScale(qScale);
372 inputTensorInfo.SetQuantizationOffset(qOffset);
373 outputTensorInfo.SetQuantizationScale(qScale);
374 outputTensorInfo.SetQuantizationOffset(qOffset);
375 kernelDesc.SetQuantizationScale(qScale);
376 kernelDesc.SetQuantizationOffset(qOffset);
377 biasDesc.SetQuantizationScale(qScale*qScale);
378 biasDesc.SetQuantizationOffset(0);
379 }
380
telsoa01c577f2c2018-08-31 09:22:23 +0100381 // Construct the input data.
surmeh013537c2c2018-05-18 16:31:43 +0100382 std::vector<T> inputData;
383 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
jimfly01382a91d2018-10-26 15:55:50 +0100384
385 // At this point if we require it permute the input data
386 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000387 if (layout == armnn::DataLayout::NHWC)
jimfly01382a91d2018-10-26 15:55:50 +0100388 {
389 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000390 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly01382a91d2018-10-26 15:55:50 +0100391 inputData = tmp;
392 }
393
surmeh013537c2c2018-05-18 16:31:43 +0100394 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
395
telsoa01c577f2c2018-08-31 09:22:23 +0100396 // Construct the output data, with bias applied, as appropriate.
surmeh013537c2c2018-05-18 16:31:43 +0100397 std::vector<T> outputData;
398 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
399 if (biasEnabled)
400 {
401 std::vector<T> biasV;
402 biasV.assign(bias.data(), bias.data() + outputChannels);
403 ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
404 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
405 outputWidth, outputHeight);
406 }
407
408 LayerTestResult<T, 4> ret(outputTensorInfo);
jimfly01382a91d2018-10-26 15:55:50 +0100409
410 // At this point if we require it permute the expected output
Matthew Bentham8800c002018-11-19 13:19:28 +0000411 if (layout == armnn::DataLayout::NHWC)
jimfly01382a91d2018-10-26 15:55:50 +0100412 {
413 std::vector<T> tmp(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000414 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
jimfly01382a91d2018-10-26 15:55:50 +0100415 outputData = tmp;
416 }
417
surmeh013537c2c2018-05-18 16:31:43 +0100418 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
419
420 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
421 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
422
423 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
jimfly01382a91d2018-10-26 15:55:50 +0100424
surmeh013537c2c2018-05-18 16:31:43 +0100425 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
426
427 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
428 if (biasEnabled)
429 {
430 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
431 }
432
433 armnn::DepthwiseConvolution2dQueueDescriptor data;
434 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100435 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
surmeh013537c2c2018-05-18 16:31:43 +0100436 data.m_Parameters.m_StrideX = strideX;
437 data.m_Parameters.m_StrideY = strideY;
438 data.m_Parameters.m_PadLeft = padLeft;
439 data.m_Parameters.m_PadRight = padRight;
440 data.m_Parameters.m_PadTop = padTop;
441 data.m_Parameters.m_PadBottom = padBottom;
442 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000443 data.m_Parameters.m_DataLayout = layout;
surmeh013537c2c2018-05-18 16:31:43 +0100444
445 armnn::WorkloadInfo info;
446 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
447 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
448
449 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
450 inputHandle->Allocate();
451 outputHandle->Allocate();
452
453 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
454
telsoa014fcda012018-03-09 14:13:49 +0000455 workload->Execute();
456
457 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
458
459 return ret;
460}
461
462template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000463LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
464 armnn::IWorkloadFactory& workloadFactory,
465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
466 float qScale,
467 int32_t qOffset,
468 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000469 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000470{
471 unsigned int inputHeight = 3;
472 unsigned int inputWidth = 3;
473 unsigned int inputChannels = 2;
474 unsigned int inputNum = 1;
475
476 unsigned int kernelHeight = 3;
477 unsigned int kernelWidth = 3;
478 unsigned int kernelChannels = inputChannels;
Matteo Martincigh747ef822018-12-18 09:26:39 +0000479 unsigned int kernelDepthMultiplier = 1;
telsoa014fcda012018-03-09 14:13:49 +0000480
481 unsigned int outputHeight = 1;
482 unsigned int outputWidth = 1;
483 unsigned int outputChannels = kernelChannels;
484 unsigned int outputNum = inputNum;
485
Nina Drozdd41b2592018-11-19 13:03:36 +0000486 armnn::TensorInfo inputTensorInfo =
487 armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
488 armnn::TensorInfo outputTensorInfo =
489 armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000490 armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth},
491 armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +0000492 armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
493
494 // Set quantization parameters if the requested type is a quantized type.
495 if(armnn::IsQuantizedType<T>())
496 {
497 inputTensorInfo.SetQuantizationScale(qScale);
498 inputTensorInfo.SetQuantizationOffset(qOffset);
499 outputTensorInfo.SetQuantizationScale(qScale);
500 outputTensorInfo.SetQuantizationOffset(qOffset);
501 kernelDesc.SetQuantizationScale(qScale);
502 kernelDesc.SetQuantizationOffset(qOffset);
503 biasDesc.SetQuantizationScale(qScale*qScale);
504 biasDesc.SetQuantizationOffset(0);
505 }
jimfly01b9c89632018-10-26 16:50:13 +0100506 std::vector<T> inputData = std::vector<T>(
507 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
508 1.f, 2.f, 1.f,
509 2.f, 1.f, 2.f,
510 1.f, 2.f, 1.f,
telsoa014fcda012018-03-09 14:13:49 +0000511
jimfly01b9c89632018-10-26 16:50:13 +0100512 1.f, 2.f, 1.f,
513 2.f, 1.f, 2.f,
514 1.f, 2.f, 1.f,
515 }));
516 // at this point if we require it permute the input data
517 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000518 if (layout == armnn::DataLayout::NHWC)
jimfly01b9c89632018-10-26 16:50:13 +0100519 {
520 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000521 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly01b9c89632018-10-26 16:50:13 +0100522 inputData = tmp;
523 }
524 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +0000525
526 std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
527 {0, 2}));
528 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
529
jimfly01b9c89632018-10-26 16:50:13 +0100530 std::vector<T> kernelData = std::vector<T>(
531 QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
532 1.f, 0.f, 1.f,
533 0.f, 0.f, 0.f,
534 -1.f, 0.f, -1.f,
telsoa014fcda012018-03-09 14:13:49 +0000535
jimfly01b9c89632018-10-26 16:50:13 +0100536 1.f, 0.f, 1.f,
537 0.f, 0.f, 0.f,
538 -1.f, 0.f, -1.f,
539 }));
jimfly01b9c89632018-10-26 16:50:13 +0100540 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
telsoa014fcda012018-03-09 14:13:49 +0000541
telsoa01c577f2c2018-08-31 09:22:23 +0100542 // Manually calculated.
telsoa014fcda012018-03-09 14:13:49 +0000543 std::vector<T> outputImage(
544 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
545 outputTensorInfo.GetQuantizationOffset(),
546 {0.f, 0.f})
547 );
548
telsoa01c577f2c2018-08-31 09:22:23 +0100549 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000550 if(biasEnabled)
551 {
552 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
553 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
554 outputWidth, outputHeight);
555 }
556
557 LayerTestResult<T, 4> ret(outputTensorInfo);
Matthew Bentham8800c002018-11-19 13:19:28 +0000558 if (layout == armnn::DataLayout::NHWC)
jimfly01b9c89632018-10-26 16:50:13 +0100559 {
560 std::vector<T> tmp(outputImage.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000561 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
jimfly01b9c89632018-10-26 16:50:13 +0100562 outputImage = tmp;
563 }
564
telsoa014fcda012018-03-09 14:13:49 +0000565 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
566
567 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
568 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
569
570 armnn::DepthwiseConvolution2dQueueDescriptor data;
571 armnn::WorkloadInfo info;
572 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
573 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
574
575 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
576 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
577
578 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
579 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
580
581 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100582 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000583 data.m_Parameters.m_StrideX = 1;
584 data.m_Parameters.m_StrideY = 1;
585 data.m_Parameters.m_PadLeft = 0;
586 data.m_Parameters.m_PadRight = 0;
587 data.m_Parameters.m_PadTop = 0;
588 data.m_Parameters.m_PadBottom = 0;
589 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000590 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000591
592 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
593 inputHandle->Allocate();
594 outputHandle->Allocate();
595
596 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
597
598 workload->Execute();
599
600 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
601
602 return ret;
603}
604
605template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000606LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
607 armnn::IWorkloadFactory& workloadFactory,
608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
609 float qScale,
610 int32_t qOffset,
611 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000612 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000613{
614 unsigned int depthMultiplier = 2;
615
616 unsigned int inputHeight = 8;
617 unsigned int inputWidth = 16;
618 unsigned int inputChannels = 2;
619 unsigned int inputBatchSize = 1;
620
621 unsigned int kernelHeight = 5;
622 unsigned int kernelWidth = 3;
623
624 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
625 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
626 unsigned int outputChannels = inputChannels * depthMultiplier;
627 unsigned int outputBatchSize = inputBatchSize;
628
Nina Drozdd41b2592018-11-19 13:03:36 +0000629 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
jimfly01d84216a2018-10-26 12:56:21 +0100630 inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
Nina Drozdd41b2592018-11-19 13:03:36 +0000631 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
jimfly01d84216a2018-10-26 12:56:21 +0100632 outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000633 armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth},
634 armnn::GetDataType<T>());
telsoa014fcda012018-03-09 14:13:49 +0000635 armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
636
637 // Set quantization parameters if the requested type is a quantized type.
638 if(armnn::IsQuantizedType<T>())
639 {
640 inputTensorInfo.SetQuantizationScale(qScale);
641 inputTensorInfo.SetQuantizationOffset(qOffset);
642 outputTensorInfo.SetQuantizationScale(qScale);
643 outputTensorInfo.SetQuantizationOffset(qOffset);
644 kernelDesc.SetQuantizationScale(qScale);
645 kernelDesc.SetQuantizationOffset(qOffset);
646 biasDesc.SetQuantizationScale(qScale*qScale);
647 biasDesc.SetQuantizationOffset(0);
648 }
649
jimfly01d84216a2018-10-26 12:56:21 +0100650 // NOTE: originalInputData is in NCHW format
651 std::vector<T> originalInputData = std::vector<T>(
652 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
653 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
654 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
655 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
656 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
657 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
658 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
659 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
660 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
661 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
662 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
663 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
664 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
665 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
666 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
667 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
668 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
669 }));
670 std::vector<T> inputData = originalInputData;
671 // at this point if we require it permute the input data
672 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000673 if (layout == armnn::DataLayout::NHWC)
jimfly01d84216a2018-10-26 12:56:21 +0100674 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000675 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
676 originalInputData.data(), inputData.data(), sizeof(T));
jimfly01d84216a2018-10-26 12:56:21 +0100677 }
678 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +0000679
680 std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
681 {0, 2, 1, -1}));
682 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
683
Matteo Martincigh747ef822018-12-18 09:26:39 +0000684 std::vector<T> kernelData = std::vector<T>(
jimfly01d84216a2018-10-26 12:56:21 +0100685 QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
686 1, 1, 1,
687 1, -1, 1,
688 1, 1, 1,
689 1, 1, 1,
690 1, 1, 1,
telsoa014fcda012018-03-09 14:13:49 +0000691
jimfly01d84216a2018-10-26 12:56:21 +0100692 2, 2, 2,
693 2, 2, 2,
694 2, 2, 2,
695 2, 2, 2,
696 2, 2, 2,
telsoa014fcda012018-03-09 14:13:49 +0000697
jimfly01d84216a2018-10-26 12:56:21 +0100698 0, 0, 0,
699 0, -1, 0,
700 0, 0, 0,
701 0, 0, 0,
702 0, 0, 0,
telsoa014fcda012018-03-09 14:13:49 +0000703
jimfly01d84216a2018-10-26 12:56:21 +0100704 0, 0, 0,
705 0, 0, 0,
706 0, 1, 0,
707 0, 0, 0,
708 0, 0, 0
Matteo Martincigh747ef822018-12-18 09:26:39 +0000709
jimfly01d84216a2018-10-26 12:56:21 +0100710 }));
jimfly01d84216a2018-10-26 12:56:21 +0100711 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
telsoa014fcda012018-03-09 14:13:49 +0000712
telsoa01c577f2c2018-08-31 09:22:23 +0100713 // Manually calculated.
jimfly01d84216a2018-10-26 12:56:21 +0100714 std::vector<T> originalOutputImage = std::vector<T>(
telsoa014fcda012018-03-09 14:13:49 +0000715 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
716 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f,
717 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
718 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
719 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
720 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
721 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
722
723 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
724 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
725 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
726 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
727 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
728 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
729
730 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
731 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
732 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
733 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
734 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
735 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
736
737 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
738 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
739 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
740 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
741 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
742 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
743 }));
744
telsoa01c577f2c2018-08-31 09:22:23 +0100745 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000746 if(biasEnabled)
747 {
jimfly01d84216a2018-10-26 12:56:21 +0100748 ApplyBias(originalOutputImage,
749 outputTensorInfo.GetQuantizationScale(),
750 outputTensorInfo.GetQuantizationOffset(),
751 biasV,
752 biasDesc.GetQuantizationScale(),
753 biasDesc.GetQuantizationOffset(),
754 outputWidth,
755 outputHeight);
telsoa014fcda012018-03-09 14:13:49 +0000756 }
757
758 LayerTestResult<T, 4> ret(outputTensorInfo);
jimfly01d84216a2018-10-26 12:56:21 +0100759 std::vector<T> outputImage = originalOutputImage;
Matthew Bentham8800c002018-11-19 13:19:28 +0000760 if (layout == armnn::DataLayout::NHWC)
jimfly01d84216a2018-10-26 12:56:21 +0100761 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000762 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
763 originalOutputImage.data(), outputImage.data(), sizeof(T));
jimfly01d84216a2018-10-26 12:56:21 +0100764 }
765
telsoa014fcda012018-03-09 14:13:49 +0000766 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
767
768 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
769 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
770
771 armnn::DepthwiseConvolution2dQueueDescriptor data;
772 armnn::WorkloadInfo info;
773 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
774 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
775
776 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
777 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
778
779 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
780 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
781
782 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100783 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000784 data.m_Parameters.m_StrideX = 2;
785 data.m_Parameters.m_StrideY = 1;
786 data.m_Parameters.m_PadLeft = 0;
787 data.m_Parameters.m_PadRight = 0;
788 data.m_Parameters.m_PadTop = 1;
789 data.m_Parameters.m_PadBottom = 1;
790 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000791 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000792
793 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
794 inputHandle->Allocate();
795 outputHandle->Allocate();
796
797 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
798
799 workload->Execute();
800
801 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
802
803 return ret;
804}
805
Nikhil Rajcec6b652018-10-12 13:51:57 +0100806template<typename T, typename B>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000807LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
808 armnn::IWorkloadFactory& workloadFactory,
809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
810 const boost::multi_array<T, 4>& input,
811 const boost::multi_array<T, 4>& kernel,
812 const boost::multi_array<B, 1>& bias,
813 const boost::multi_array<T, 4>& outputExpected,
814 float qScale,
815 int32_t qOffset,
816 uint32_t padLeft = 0,
817 uint32_t padTop = 0,
818 uint32_t padRight = 0,
819 uint32_t padBottom = 0,
820 uint32_t strideX = 1,
821 uint32_t strideY = 1)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100822{
823 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
824 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
825 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
826 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
827
828 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000829 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
830 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
831 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100832
833 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
834 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
835 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
836 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
837
838 // Creates the tensors.
839 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
840 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
841 armnn::GetDataType<T>());
Matteo Martincigh747ef822018-12-18 09:26:39 +0000842 armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
Nikhil Rajcec6b652018-10-12 13:51:57 +0100843 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
844
845 // Set quantization parameters if the requested type is a quantized type.
846 if (armnn::IsQuantizedType<T>())
847 {
848 inputTensorInfo.SetQuantizationScale(qScale);
849 inputTensorInfo.SetQuantizationOffset(qOffset);
850 outputTensorInfo.SetQuantizationScale(qScale);
851 outputTensorInfo.SetQuantizationOffset(qOffset);
852 kernelDesc.SetQuantizationScale(qScale);
853 kernelDesc.SetQuantizationOffset(qOffset);
854 biasDesc.SetQuantizationScale(qScale*qScale);
855 biasDesc.SetQuantizationOffset(0);
856 }
857
858 // Construct the input data.
859 std::vector<T> inputData;
860 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
861 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
862
863 // Construct the output data, with bias applied, as appropriate.
864 std::vector<T> outputData;
865 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
866
867 LayerTestResult<T, 4> ret(outputTensorInfo);
868 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
869
870 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
871 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
872
873 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
874 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
875
876 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
877
878 armnn::DepthwiseConvolution2dQueueDescriptor data;
879 data.m_Weight = &weightsTensor;
880 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
881 data.m_Parameters.m_StrideX = strideX;
882 data.m_Parameters.m_StrideY = strideY;
883 data.m_Parameters.m_PadLeft = padLeft;
884 data.m_Parameters.m_PadRight = padRight;
885 data.m_Parameters.m_PadTop = padTop;
886 data.m_Parameters.m_PadBottom = padBottom;
887 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
888
889 armnn::WorkloadInfo info;
890 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
891 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
892
893 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
894
895 inputHandle->Allocate();
896 outputHandle->Allocate();
897
898 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
899
Nikhil Rajcec6b652018-10-12 13:51:57 +0100900 workload->Execute();
901
902 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
903
904 return ret;
905}
906
telsoa014fcda012018-03-09 14:13:49 +0000907template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000908LayerTestResult<T,4> Convolution1dTestImpl(
909 armnn::IWorkloadFactory& workloadFactory,
910 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
911 float qScale,
912 int32_t qOffset,
913 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000914{
915 using B = typename FullyConnectedBiasTypeForInputType<T>::Type;
916
telsoa01c577f2c2018-08-31 09:22:23 +0100917 // Until we have a specialist 1D convolution layer, we can fake one using
telsoa014fcda012018-03-09 14:13:49 +0000918 // 2D convolution with the final dimension set to 1.
919 // I don't anticipate this being particularly slow, given that convolution is implemented
920 // as a matrix multiplication, at which point dimension doesn't matter.
921
922 unsigned int batchSize = 1;
923 unsigned int inputChannels = 2;
924 unsigned int outputChannels = 3;
telsoa01c577f2c2018-08-31 09:22:23 +0100925 unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
telsoa014fcda012018-03-09 14:13:49 +0000926 unsigned int kernelSize = 3;
927 unsigned int padSize = 2;
928 unsigned int stride = 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100929 unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
telsoa014fcda012018-03-09 14:13:49 +0000930
931 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType<T>());
932 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType<T>());
933 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType<T>());
934 armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType<B>());
935
936 // Set quantization parameters if the requested type is a quantized type.
937 if(armnn::IsQuantizedType<T>())
938 {
939 inputInfo.SetQuantizationScale(qScale);
940 inputInfo.SetQuantizationOffset(qOffset);
941 outputInfo.SetQuantizationScale(qScale);
942 outputInfo.SetQuantizationOffset(qOffset);
943 kernelInfo.SetQuantizationScale(qScale);
944 kernelInfo.SetQuantizationOffset(qOffset);
945 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
946 biasInfo.SetQuantizationOffset(0);
947 }
948
949 std::vector<T> inputData(
950 QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), {
951 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
952 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
953 }));
954
955 std::vector<T> kernelData(
956 QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), {
957 1.0f, 0.0f, 0.0f,
958 0.0f, 2.0f, -1.5f,
959
960 0.0f, 0.0f, 0.0f,
961 0.2f, 0.2f, 0.2f,
962
963 0.5f, 0.0f, 0.5f,
964 0.0f, -1.0f, 0.0f
965 }));
966
967 std::vector<B> biasData(
968 QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), {
969 1.0f, 0.0f, 0.0f
970 }));
971
972 std::vector<T> outputData(
973 QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), {
974 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
975 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
976 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
977 }));
978
telsoa01c577f2c2018-08-31 09:22:23 +0100979 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000980 if(biasEnabled)
981 {
982 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
983 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
984 1, outputSize);
985 }
986
987 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
988 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
989
990 armnn::Convolution2dQueueDescriptor data;
991 armnn::WorkloadInfo info;
992 armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
993 armnn::ScopedCpuTensorHandle biasTensor(biasInfo);
994
995 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
996 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
997
998 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
999 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
1000
1001 data.m_Weight = &weightsTensor;
1002 data.m_Bias = &biasTensor;
1003 data.m_Parameters.m_StrideX = 1;
1004 data.m_Parameters.m_StrideY = stride;
1005 data.m_Parameters.m_PadLeft = 0;
1006 data.m_Parameters.m_PadRight = 0;
1007 data.m_Parameters.m_PadTop = padSize;
1008 data.m_Parameters.m_PadBottom = padSize;
1009 data.m_Parameters.m_BiasEnabled = biasEnabled;
1010
1011 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
1012 inputHandle->Allocate();
1013 outputHandle->Allocate();
1014
1015 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
1016
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001017 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001018
telsoa01c577f2c2018-08-31 09:22:23 +01001019 // Output
telsoa014fcda012018-03-09 14:13:49 +00001020 LayerTestResult<T,4> ret(outputInfo);
1021 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1022 ret.outputExpected = MakeTensor<T, 4>(outputInfo, outputData);
1023 return ret;
1024}
1025
1026
1027
1028template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001029LayerTestResult<T,4> CompareConvolution2dTestImpl(
1030 armnn::IWorkloadFactory& workloadFactory,
1031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1032 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001033{
1034 unsigned int inputHeight = 8;
1035 unsigned int inputWidth = 16;
1036 unsigned int inputChannels = 3;
1037 unsigned int inputNum = 5;
1038
1039 unsigned int kernelHeight = 3;
1040 unsigned int kernelWidth = 3;
1041
1042 unsigned int strideX = 2;
1043 unsigned int strideY = 3;
1044 unsigned int padX = 1;
1045 unsigned int padY = 1;
1046
1047 unsigned int outputNum = inputNum;
1048 unsigned int outputChannels = 2;
1049 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1050 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1051
1052 armnn::TensorInfo inputTensorInfo;
1053 armnn::TensorInfo outputTensorInfo;
1054 armnn::TensorInfo kernelDesc;
1055 armnn::TensorInfo biasDesc;
1056
Matteo Martincigh747ef822018-12-18 09:26:39 +00001057 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1058 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1059 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1060 unsigned int biasShape[] = {outputChannels};
telsoa014fcda012018-03-09 14:13:49 +00001061
1062 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
1063 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
1064 kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>());
1065 biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
1066
1067 LayerTestResult<T,4> ret(outputTensorInfo);
1068
1069 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
1070 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
1071 auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028);
1072
1073 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1074 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1075
1076 armnn::Convolution2dQueueDescriptor data;
1077 armnn::WorkloadInfo info;
1078 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
1079 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
1080
1081 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
1082 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
1083
1084 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1085 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1086 data.m_Weight = &weightsTensor;
1087 data.m_Bias = &biasTensor;
1088 data.m_Parameters.m_StrideX = strideX;
1089 data.m_Parameters.m_StrideY = strideY;
1090 data.m_Parameters.m_PadLeft = padX;
1091 data.m_Parameters.m_PadRight = padX;
1092 data.m_Parameters.m_PadTop = padY;
1093 data.m_Parameters.m_PadBottom = padY;
1094 data.m_Parameters.m_BiasEnabled = true;
1095
1096 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1097 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1098
1099 armnn::Convolution2dQueueDescriptor refData = data;
1100 armnn::WorkloadInfo refInfo = info;
1101 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1102 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1103
1104 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
1105 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
1106
1107 outputHandleRef->Allocate();
1108 inputHandleRef->Allocate();
1109
1110 inputHandle->Allocate();
1111 outputHandle->Allocate();
1112
1113 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1114 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1115
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001116 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +00001117
telsoa014fcda012018-03-09 14:13:49 +00001118 workloadRef->Execute();
1119
1120 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1121 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1122
1123 return ret;
1124}
1125
1126template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001127LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
1128 armnn::IWorkloadFactory& workloadFactory,
1129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1130 armnn::IWorkloadFactory& refWorkloadFactory,
Matteo Martincigh21350152018-11-28 16:22:22 +00001131 const armnnUtils::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00001132{
1133 unsigned int inputHeight = 8;
1134 unsigned int inputWidth = 16;
1135 unsigned int inputChannels = 3;
1136 unsigned int inputNum = 5;
1137
1138 unsigned int kernelHeight = 3;
1139 unsigned int kernelWidth = 3;
1140 unsigned int channelMultiplier = 1;
1141
1142 unsigned int strideX = 2;
1143 unsigned int strideY = 3;
1144 unsigned int padX = 1;
1145 unsigned int padY = 1;
1146
1147 unsigned int outputNum = inputNum;
1148 unsigned int outputChannels = inputChannels * channelMultiplier;
1149 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1150 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1151
1152 armnn::TensorInfo inputTensorInfo;
1153 armnn::TensorInfo outputTensorInfo;
1154 armnn::TensorInfo kernelDesc;
1155 armnn::TensorInfo biasDesc;
1156
jimfly017af00da2018-10-31 14:43:53 +00001157
1158 std::vector<unsigned int> inputShape;
1159 std::vector<unsigned int> outputShape;
Matteo Martincigh747ef822018-12-18 09:26:39 +00001160 std::vector<unsigned int> kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth };
1161 std::vector<unsigned int> biasShape{ outputChannels };
jimfly017af00da2018-10-31 14:43:53 +00001162 switch (layout.GetDataLayout())
1163 {
1164 case armnn::DataLayout::NCHW:
1165 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
1166 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
jimfly017af00da2018-10-31 14:43:53 +00001167 break;
1168 case armnn::DataLayout ::NHWC:
1169 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
1170 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
jimfly017af00da2018-10-31 14:43:53 +00001171 break;
1172 default:
1173 throw armnn::InvalidArgumentException("unknown data layout ["
1174 + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
1175 }
telsoa014fcda012018-03-09 14:13:49 +00001176
1177 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
1178 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
1179 int32_t qOffset = 0;
1180
jimfly017af00da2018-10-31 14:43:53 +00001181 inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
1182 outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), armnn::GetDataType<T>(), outputQScale, qOffset);
1183 kernelDesc = armnn::TensorInfo(4, kernelShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
1184 biasDesc = armnn::TensorInfo(
1185 1, biasShape.data(), armnn::GetBiasDataType(armnn::GetDataType<T>()), inputsQScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00001186
1187 LayerTestResult<T, 4> ret(outputTensorInfo);
1188
1189 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f);
1190 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
jimfly01d84216a2018-10-26 12:56:21 +01001191 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
1192 biasDesc, 1028, 0.0f, 255.0f);
telsoa014fcda012018-03-09 14:13:49 +00001193
1194 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1195 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1196
1197 armnn::DepthwiseConvolution2dQueueDescriptor data;
1198 armnn::WorkloadInfo info;
1199 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
1200 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
1201
1202 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
1203 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
1204
1205 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1206 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1207 data.m_Weight = &weightsTensor;
1208 data.m_Bias = &biasTensor;
1209 data.m_Parameters.m_StrideX = strideX;
1210 data.m_Parameters.m_StrideY = strideY;
1211 data.m_Parameters.m_PadLeft = padX;
1212 data.m_Parameters.m_PadRight = padX;
1213 data.m_Parameters.m_PadTop = padY;
1214 data.m_Parameters.m_PadBottom = padY;
1215 data.m_Parameters.m_BiasEnabled = true;
jimfly017af00da2018-10-31 14:43:53 +00001216 data.m_Parameters.m_DataLayout = layout.GetDataLayout();
telsoa014fcda012018-03-09 14:13:49 +00001217
1218 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1219 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1220
1221 armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
1222 armnn::WorkloadInfo refInfo = info;
1223 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1224 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1225
1226 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
1227 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
1228
1229 outputHandleRef->Allocate();
1230 inputHandleRef->Allocate();
1231
1232 inputHandle->Allocate();
1233 outputHandle->Allocate();
1234
1235 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1236 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1237
1238 workload->Execute();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001239
telsoa014fcda012018-03-09 14:13:49 +00001240 workloadRef->Execute();
1241
1242 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1243 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1244
1245 return ret;
1246}