blob: 24f08255045d82562e010e364cd2ab71b8a1d309 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
Nina Drozdd41b2592018-11-19 13:03:36 +00008#include "TensorUtils.hpp"
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00009#include "TypeUtils.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000010
Matteo Martincigh21350152018-11-28 16:22:22 +000011#include <Permute.hpp>
12#include <DataLayoutIndexed.hpp>
13
14#include <test/TensorHelpers.hpp>
15
telsoa014fcda012018-03-09 14:13:49 +000016#include <armnn/ArmNN.hpp>
17#include <armnn/Tensor.hpp>
18#include <armnn/TypesUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000020#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000021#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000022#include <backendsCommon/WorkloadFactory.hpp>
Matteo Martincigh21350152018-11-28 16:22:22 +000023#include <backendsCommon/test/QuantizeHelper.hpp>
24
jimfly010a088a62018-10-25 17:05:05 +010025#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000026
Matteo Martincigh21350152018-11-28 16:22:22 +000027#include <string>
28
telsoa014fcda012018-03-09 14:13:49 +000029// Mapping from input type to bias type for fully connected layers.
30// float => float, uint8_t => int32_t
31template<typename T>
32struct FullyConnectedBiasTypeForInputType;
33
34template<>
35struct FullyConnectedBiasTypeForInputType<float>
36{
37 using Type = float;
38};
39
40template<>
41struct FullyConnectedBiasTypeForInputType<uint8_t>
42{
43 using Type = int32_t;
44};
45
telsoa01c577f2c2018-08-31 09:22:23 +010046// Modifies a std::vector in-place using a specified bias.
telsoa014fcda012018-03-09 14:13:49 +000047template<typename T, typename B>
48void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
49 const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
50{
51 BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
52 "Invalid type and parameter combination.");
53 BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
54 "Invalid type and parameter combination.");
55
telsoa01c577f2c2018-08-31 09:22:23 +010056 // Note we need to dequantize and re-quantize the image value and the bias.
telsoa014fcda012018-03-09 14:13:49 +000057 for (uint32_t i = 0; i < bias.size(); ++i)
58 {
59 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
60 for (uint32_t y = 0; y < h; ++y)
61 {
62 for (uint32_t x = 0; x < w; ++x)
63 {
64 uint32_t offset = (i * h + y) * w + x;
65 BOOST_ASSERT(offset < v.size());
66 T& outRef = v[offset];
67 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
68 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
69 }
70 }
71 }
72}
73
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000074template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
75 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000076LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
77 armnn::IWorkloadFactory& workloadFactory,
78 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
79 const boost::multi_array<T, 4>& originalInput,
80 const boost::multi_array<T, 4>& originalKernel,
81 const boost::multi_array<B, 1>& bias,
82 const boost::multi_array<T, 4>& originalOutputExpected,
83 float qScale,
84 int32_t qOffset,
Matthew Bentham8800c002018-11-19 13:19:28 +000085 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000086 uint32_t padLeft = 0,
87 uint32_t padTop = 0,
88 uint32_t padRight = 0,
Mike Kelly7332ed82018-12-20 17:03:06 +000089 uint32_t padBottom = 0,
90 uint32_t strideX = 1,
91 uint32_t strideY = 1)
telsoa014fcda012018-03-09 14:13:49 +000092{
jimfly010a088a62018-10-25 17:05:05 +010093 unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
94 unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
95 unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
96 unsigned int inputNum = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +000097
jimfly010a088a62018-10-25 17:05:05 +010098 unsigned int outputHeight = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
99 unsigned int outputWidth = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
100 unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
101 unsigned int outputNum = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +0000102
jimfly010a088a62018-10-25 17:05:05 +0100103 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
104 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
105 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
106 unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +0000107
108 bool biasEnabled = bias.size() > 0;
109
telsoa01c577f2c2018-08-31 09:22:23 +0100110 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
telsoa014fcda012018-03-09 14:13:49 +0000111 BOOST_ASSERT(inputNum == 1);
112 BOOST_ASSERT(outputNum == 1);
113
telsoa01c577f2c2018-08-31 09:22:23 +0100114 // If a bias is used, its size must equal the number of output channels.
telsoa014fcda012018-03-09 14:13:49 +0000115 BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
116
117
telsoa01c577f2c2018-08-31 09:22:23 +0100118 // Note these tensors will use two (identical) batches.
Nina Drozdd41b2592018-11-19 13:03:36 +0000119 armnn::TensorInfo inputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000120 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
Nina Drozdd41b2592018-11-19 13:03:36 +0000121 armnn::TensorInfo outputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000122 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Nina Drozdd41b2592018-11-19 13:03:36 +0000123 armnn::TensorInfo kernelDesc =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000124 armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
125 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
telsoa014fcda012018-03-09 14:13:49 +0000126
127 // Set quantization parameters if the requested type is a quantized type.
128 if(armnn::IsQuantizedType<T>())
129 {
130 inputTensorInfo.SetQuantizationScale(qScale);
131 inputTensorInfo.SetQuantizationOffset(qOffset);
132 outputTensorInfo.SetQuantizationScale(qScale);
133 outputTensorInfo.SetQuantizationOffset(qOffset);
134 kernelDesc.SetQuantizationScale(qScale);
135 kernelDesc.SetQuantizationOffset(qOffset);
136 biasDesc.SetQuantizationScale(qScale*qScale);
137 biasDesc.SetQuantizationOffset(0);
138 }
139
140 LayerTestResult<T, 4> ret(outputTensorInfo);
141
telsoa01c577f2c2018-08-31 09:22:23 +0100142 // Construct input data - two batches of the same input image.
telsoa014fcda012018-03-09 14:13:49 +0000143 std::vector<T> inputImage;
jimfly010a088a62018-10-25 17:05:05 +0100144 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
telsoa014fcda012018-03-09 14:13:49 +0000145 std::vector<T> inputData;
146 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
147 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
jimfly010a088a62018-10-25 17:05:05 +0100148
149 // at this point if we require it permute the input data
150 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000151 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100152 {
153 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000154 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100155 inputData = tmp;
156 }
157
telsoa014fcda012018-03-09 14:13:49 +0000158 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
159
160 std::vector<T> outputImage;
jimfly010a088a62018-10-25 17:05:05 +0100161 outputImage.assign(originalOutputExpected.data(),
162 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
telsoa014fcda012018-03-09 14:13:49 +0000163
telsoa01c577f2c2018-08-31 09:22:23 +0100164 // Apply bias to output image if it is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000165 if(biasEnabled)
166 {
167 std::vector<T> biasV;
168 biasV.assign(bias.data(), bias.data() + outputChannels);
169 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
170 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
171 outputWidth, outputHeight);
172 }
173
telsoa01c577f2c2018-08-31 09:22:23 +0100174 // Construct expected output data - two identical images.
telsoa014fcda012018-03-09 14:13:49 +0000175 std::vector<T> outputData;
176 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
177 outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
178
jimfly010a088a62018-10-25 17:05:05 +0100179 // at this point if we require it permute the expected output
Matthew Bentham8800c002018-11-19 13:19:28 +0000180 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100181 {
182 std::vector<T> tmp(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000183 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100184 outputData = tmp;
185 }
telsoa014fcda012018-03-09 14:13:49 +0000186 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
187
telsoa014fcda012018-03-09 14:13:49 +0000188 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
189 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
190
191 armnn::Convolution2dQueueDescriptor data;
192 armnn::WorkloadInfo info;
193 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
194 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
jimfly010a088a62018-10-25 17:05:05 +0100195 // Permute the kernel if necessary
196 boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
Matthew Bentham8800c002018-11-19 13:19:28 +0000197 if (layout == armnn::DataLayout::NHWC)
jimfly010a088a62018-10-25 17:05:05 +0100198 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000199 armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
jimfly010a088a62018-10-25 17:05:05 +0100200 }
telsoa014fcda012018-03-09 14:13:49 +0000201 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
202
203 if(biasEnabled)
204 {
205 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
206 }
207
208 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
210
211 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100212 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
telsoa014fcda012018-03-09 14:13:49 +0000213 data.m_Parameters.m_StrideX = strideX;
214 data.m_Parameters.m_StrideY = strideY;
215 data.m_Parameters.m_PadLeft = padLeft;
216 data.m_Parameters.m_PadRight = padRight;
217 data.m_Parameters.m_PadTop = padTop;
218 data.m_Parameters.m_PadBottom = padBottom;
219 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000220 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000221
222 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
223 inputHandle->Allocate();
224 outputHandle->Allocate();
225
226 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
227
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000228 ExecuteWorkload(*workload, memoryManager);
surmeh013537c2c2018-05-18 16:31:43 +0100229
230 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
231
232 return ret;
233}
234
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000235template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
236 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000237LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
238 armnn::IWorkloadFactory& workloadFactory,
239 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
240 const boost::multi_array<T, 4>& input,
241 const boost::multi_array<T, 4>& kernel,
242 const boost::multi_array<B, 1>& bias,
243 const boost::multi_array<T, 4>& outputExpected,
Mike Kelly7332ed82018-12-20 17:03:06 +0000244 const armnn::DataLayout dataLayout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000245 float qScale,
246 int32_t qOffset,
247 uint32_t padLeft = 1,
248 uint32_t padTop = 1,
249 uint32_t padRight = 1,
250 uint32_t padBottom = 1,
251 uint32_t strideX = 1,
252 uint32_t strideY = 1)
Francis Murtaghd59116e2018-10-04 16:03:07 +0100253{
254 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
255 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
256 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
257 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
258
259 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
260 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
261 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
262 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
263
264 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
265 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
266 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
267 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
268
269 bool biasEnabled = bias.size() > 0;
270
271 // Creates the tensors.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000272 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100273 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000274 ArmnnType);
275 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
276 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100277
278 // Construct the input data.
279 std::vector<T> inputData;
280 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
281 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
282
283 // Construct the output data, with bias applied, as appropriate.
284 std::vector<T> outputData;
285 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
286
287 LayerTestResult<T, 4> ret(outputTensorInfo);
288 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
289
290 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
291 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
292
293 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
294 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
295
296 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
297
298 armnn::Convolution2dQueueDescriptor data;
299
300 data.m_Weight = &weightsTensor;
301 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
302 data.m_Parameters.m_StrideX = strideX;
303 data.m_Parameters.m_StrideY = strideY;
304 data.m_Parameters.m_PadLeft = padLeft;
305 data.m_Parameters.m_PadRight = padRight;
306 data.m_Parameters.m_PadTop = padTop;
307 data.m_Parameters.m_PadBottom = padBottom;
308 data.m_Parameters.m_BiasEnabled = biasEnabled;
309 data.m_Parameters.m_DataLayout = dataLayout;
310
311 armnn::WorkloadInfo info;
312 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
313 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
314
315 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
316 inputHandle->Allocate();
317 outputHandle->Allocate();
318
319 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
320
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000321 ExecuteWorkload(*workload, memoryManager);
Francis Murtaghd59116e2018-10-04 16:03:07 +0100322
323 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
324
325 return ret;
326}
327
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000328template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
329 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000330LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
331 armnn::IWorkloadFactory& workloadFactory,
332 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
333 const boost::multi_array<T, 4>& input,
Matteo Martincigh747ef822018-12-18 09:26:39 +0000334 const boost::multi_array<T, 4>& kernel,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000335 const boost::multi_array<B, 1>& bias,
336 const boost::multi_array<T, 4>& outputExpected,
337 float qScale,
338 int32_t qOffset,
Matthew Bentham8800c002018-11-19 13:19:28 +0000339 const armnn::DataLayout layout,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000340 uint32_t padLeft = 0,
341 uint32_t padTop = 0,
342 uint32_t padRight = 0,
343 uint32_t padBottom = 0,
344 uint32_t strideX = 1,
345 uint32_t strideY = 1)
surmeh013537c2c2018-05-18 16:31:43 +0100346{
347 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
348 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[1]);
349 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[2]);
350 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[3]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000351 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
352 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
353 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
354 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
surmeh013537c2c2018-05-18 16:31:43 +0100355 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
356 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
357 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
358 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
359
telsoa01c577f2c2018-08-31 09:22:23 +0100360 // If a bias is used, its size must equal the number of output channels.
surmeh013537c2c2018-05-18 16:31:43 +0100361 bool biasEnabled = bias.size() > 0;
362 BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
363
telsoa01c577f2c2018-08-31 09:22:23 +0100364 // Creates the tensors.
Nina Drozdd41b2592018-11-19 13:03:36 +0000365 armnn::TensorInfo inputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000366 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
Nina Drozdd41b2592018-11-19 13:03:36 +0000367 armnn::TensorInfo outputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000368 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
369 armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
370 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
surmeh013537c2c2018-05-18 16:31:43 +0100371
372 // Set quantization parameters if the requested type is a quantized type.
373 if (armnn::IsQuantizedType<T>())
374 {
375 inputTensorInfo.SetQuantizationScale(qScale);
376 inputTensorInfo.SetQuantizationOffset(qOffset);
377 outputTensorInfo.SetQuantizationScale(qScale);
378 outputTensorInfo.SetQuantizationOffset(qOffset);
379 kernelDesc.SetQuantizationScale(qScale);
380 kernelDesc.SetQuantizationOffset(qOffset);
381 biasDesc.SetQuantizationScale(qScale*qScale);
382 biasDesc.SetQuantizationOffset(0);
383 }
384
telsoa01c577f2c2018-08-31 09:22:23 +0100385 // Construct the input data.
surmeh013537c2c2018-05-18 16:31:43 +0100386 std::vector<T> inputData;
387 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
jimfly01382a91d2018-10-26 15:55:50 +0100388
389 // At this point if we require it permute the input data
390 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000391 if (layout == armnn::DataLayout::NHWC)
jimfly01382a91d2018-10-26 15:55:50 +0100392 {
393 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000394 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly01382a91d2018-10-26 15:55:50 +0100395 inputData = tmp;
396 }
397
surmeh013537c2c2018-05-18 16:31:43 +0100398 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
399
telsoa01c577f2c2018-08-31 09:22:23 +0100400 // Construct the output data, with bias applied, as appropriate.
surmeh013537c2c2018-05-18 16:31:43 +0100401 std::vector<T> outputData;
402 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
403 if (biasEnabled)
404 {
405 std::vector<T> biasV;
406 biasV.assign(bias.data(), bias.data() + outputChannels);
407 ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
408 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
409 outputWidth, outputHeight);
410 }
411
412 LayerTestResult<T, 4> ret(outputTensorInfo);
jimfly01382a91d2018-10-26 15:55:50 +0100413
414 // At this point if we require it permute the expected output
Matthew Bentham8800c002018-11-19 13:19:28 +0000415 if (layout == armnn::DataLayout::NHWC)
jimfly01382a91d2018-10-26 15:55:50 +0100416 {
417 std::vector<T> tmp(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000418 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
jimfly01382a91d2018-10-26 15:55:50 +0100419 outputData = tmp;
420 }
421
surmeh013537c2c2018-05-18 16:31:43 +0100422 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
423
424 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
425 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
426
427 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
jimfly01382a91d2018-10-26 15:55:50 +0100428
surmeh013537c2c2018-05-18 16:31:43 +0100429 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
430
431 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
432 if (biasEnabled)
433 {
434 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
435 }
436
437 armnn::DepthwiseConvolution2dQueueDescriptor data;
438 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100439 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
surmeh013537c2c2018-05-18 16:31:43 +0100440 data.m_Parameters.m_StrideX = strideX;
441 data.m_Parameters.m_StrideY = strideY;
442 data.m_Parameters.m_PadLeft = padLeft;
443 data.m_Parameters.m_PadRight = padRight;
444 data.m_Parameters.m_PadTop = padTop;
445 data.m_Parameters.m_PadBottom = padBottom;
446 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000447 data.m_Parameters.m_DataLayout = layout;
surmeh013537c2c2018-05-18 16:31:43 +0100448
449 armnn::WorkloadInfo info;
450 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
451 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
452
453 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
454 inputHandle->Allocate();
455 outputHandle->Allocate();
456
457 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
458
telsoa014fcda012018-03-09 14:13:49 +0000459 workload->Execute();
460
461 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
462
463 return ret;
464}
465
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000466template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000467LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
468 armnn::IWorkloadFactory& workloadFactory,
469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
470 float qScale,
471 int32_t qOffset,
472 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000473 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000474{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000475 using B = armnn::ResolveType<ArmnnBType>;
476
telsoa014fcda012018-03-09 14:13:49 +0000477 unsigned int inputHeight = 3;
478 unsigned int inputWidth = 3;
479 unsigned int inputChannels = 2;
480 unsigned int inputNum = 1;
481
482 unsigned int kernelHeight = 3;
483 unsigned int kernelWidth = 3;
484 unsigned int kernelChannels = inputChannels;
Matteo Martincigh747ef822018-12-18 09:26:39 +0000485 unsigned int kernelDepthMultiplier = 1;
telsoa014fcda012018-03-09 14:13:49 +0000486
487 unsigned int outputHeight = 1;
488 unsigned int outputWidth = 1;
489 unsigned int outputChannels = kernelChannels;
490 unsigned int outputNum = inputNum;
491
Nina Drozdd41b2592018-11-19 13:03:36 +0000492 armnn::TensorInfo inputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000493 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
Nina Drozdd41b2592018-11-19 13:03:36 +0000494 armnn::TensorInfo outputTensorInfo =
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000495 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000496 armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth},
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000497 ArmnnType);
498 armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
telsoa014fcda012018-03-09 14:13:49 +0000499
500 // Set quantization parameters if the requested type is a quantized type.
501 if(armnn::IsQuantizedType<T>())
502 {
503 inputTensorInfo.SetQuantizationScale(qScale);
504 inputTensorInfo.SetQuantizationOffset(qOffset);
505 outputTensorInfo.SetQuantizationScale(qScale);
506 outputTensorInfo.SetQuantizationOffset(qOffset);
507 kernelDesc.SetQuantizationScale(qScale);
508 kernelDesc.SetQuantizationOffset(qOffset);
509 biasDesc.SetQuantizationScale(qScale*qScale);
510 biasDesc.SetQuantizationOffset(0);
511 }
jimfly01b9c89632018-10-26 16:50:13 +0100512 std::vector<T> inputData = std::vector<T>(
513 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
514 1.f, 2.f, 1.f,
515 2.f, 1.f, 2.f,
516 1.f, 2.f, 1.f,
telsoa014fcda012018-03-09 14:13:49 +0000517
jimfly01b9c89632018-10-26 16:50:13 +0100518 1.f, 2.f, 1.f,
519 2.f, 1.f, 2.f,
520 1.f, 2.f, 1.f,
521 }));
522 // at this point if we require it permute the input data
523 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000524 if (layout == armnn::DataLayout::NHWC)
jimfly01b9c89632018-10-26 16:50:13 +0100525 {
526 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000527 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
jimfly01b9c89632018-10-26 16:50:13 +0100528 inputData = tmp;
529 }
530 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +0000531
532 std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
533 {0, 2}));
534 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
535
jimfly01b9c89632018-10-26 16:50:13 +0100536 std::vector<T> kernelData = std::vector<T>(
537 QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
538 1.f, 0.f, 1.f,
539 0.f, 0.f, 0.f,
540 -1.f, 0.f, -1.f,
telsoa014fcda012018-03-09 14:13:49 +0000541
jimfly01b9c89632018-10-26 16:50:13 +0100542 1.f, 0.f, 1.f,
543 0.f, 0.f, 0.f,
544 -1.f, 0.f, -1.f,
545 }));
jimfly01b9c89632018-10-26 16:50:13 +0100546 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
telsoa014fcda012018-03-09 14:13:49 +0000547
telsoa01c577f2c2018-08-31 09:22:23 +0100548 // Manually calculated.
telsoa014fcda012018-03-09 14:13:49 +0000549 std::vector<T> outputImage(
550 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
551 outputTensorInfo.GetQuantizationOffset(),
552 {0.f, 0.f})
553 );
554
telsoa01c577f2c2018-08-31 09:22:23 +0100555 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000556 if(biasEnabled)
557 {
558 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
559 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
560 outputWidth, outputHeight);
561 }
562
563 LayerTestResult<T, 4> ret(outputTensorInfo);
Matthew Bentham8800c002018-11-19 13:19:28 +0000564 if (layout == armnn::DataLayout::NHWC)
jimfly01b9c89632018-10-26 16:50:13 +0100565 {
566 std::vector<T> tmp(outputImage.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000567 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
jimfly01b9c89632018-10-26 16:50:13 +0100568 outputImage = tmp;
569 }
570
telsoa014fcda012018-03-09 14:13:49 +0000571 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
572
573 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
574 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
575
576 armnn::DepthwiseConvolution2dQueueDescriptor data;
577 armnn::WorkloadInfo info;
578 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
579 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
580
581 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
582 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
583
584 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
585 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
586
587 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100588 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000589 data.m_Parameters.m_StrideX = 1;
590 data.m_Parameters.m_StrideY = 1;
591 data.m_Parameters.m_PadLeft = 0;
592 data.m_Parameters.m_PadRight = 0;
593 data.m_Parameters.m_PadTop = 0;
594 data.m_Parameters.m_PadBottom = 0;
595 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000596 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000597
598 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
599 inputHandle->Allocate();
600 outputHandle->Allocate();
601
602 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
603
604 workload->Execute();
605
606 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
607
608 return ret;
609}
610
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000611template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000612LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
613 armnn::IWorkloadFactory& workloadFactory,
614 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
615 float qScale,
616 int32_t qOffset,
617 bool biasEnabled,
Matthew Bentham8800c002018-11-19 13:19:28 +0000618 const armnn::DataLayout layout)
telsoa014fcda012018-03-09 14:13:49 +0000619{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000620 using B = armnn::ResolveType<ArmnnBType>;
621
telsoa014fcda012018-03-09 14:13:49 +0000622 unsigned int depthMultiplier = 2;
623
624 unsigned int inputHeight = 8;
625 unsigned int inputWidth = 16;
626 unsigned int inputChannels = 2;
627 unsigned int inputBatchSize = 1;
628
629 unsigned int kernelHeight = 5;
630 unsigned int kernelWidth = 3;
631
632 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
633 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
634 unsigned int outputChannels = inputChannels * depthMultiplier;
635 unsigned int outputBatchSize = inputBatchSize;
636
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000637 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
638 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
639 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
640 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000641 armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth},
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000642 ArmnnType);
643 armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
telsoa014fcda012018-03-09 14:13:49 +0000644
645 // Set quantization parameters if the requested type is a quantized type.
646 if(armnn::IsQuantizedType<T>())
647 {
648 inputTensorInfo.SetQuantizationScale(qScale);
649 inputTensorInfo.SetQuantizationOffset(qOffset);
650 outputTensorInfo.SetQuantizationScale(qScale);
651 outputTensorInfo.SetQuantizationOffset(qOffset);
652 kernelDesc.SetQuantizationScale(qScale);
653 kernelDesc.SetQuantizationOffset(qOffset);
654 biasDesc.SetQuantizationScale(qScale*qScale);
655 biasDesc.SetQuantizationOffset(0);
656 }
657
jimfly01d84216a2018-10-26 12:56:21 +0100658 // NOTE: originalInputData is in NCHW format
659 std::vector<T> originalInputData = std::vector<T>(
660 QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
661 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
662 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
663 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
664 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
665 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
666 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
667 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
668 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
669 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
670 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
671 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
672 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
673 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
674 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
675 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
676 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
677 }));
678 std::vector<T> inputData = originalInputData;
679 // at this point if we require it permute the input data
680 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000681 if (layout == armnn::DataLayout::NHWC)
jimfly01d84216a2018-10-26 12:56:21 +0100682 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000683 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
684 originalInputData.data(), inputData.data(), sizeof(T));
jimfly01d84216a2018-10-26 12:56:21 +0100685 }
686 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
telsoa014fcda012018-03-09 14:13:49 +0000687
688 std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
689 {0, 2, 1, -1}));
690 auto bias = MakeTensor<B, 1>(biasDesc, biasV);
691
Matteo Martincigh747ef822018-12-18 09:26:39 +0000692 std::vector<T> kernelData = std::vector<T>(
jimfly01d84216a2018-10-26 12:56:21 +0100693 QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
694 1, 1, 1,
695 1, -1, 1,
696 1, 1, 1,
697 1, 1, 1,
698 1, 1, 1,
telsoa014fcda012018-03-09 14:13:49 +0000699
jimfly01d84216a2018-10-26 12:56:21 +0100700 2, 2, 2,
701 2, 2, 2,
702 2, 2, 2,
703 2, 2, 2,
704 2, 2, 2,
telsoa014fcda012018-03-09 14:13:49 +0000705
jimfly01d84216a2018-10-26 12:56:21 +0100706 0, 0, 0,
707 0, -1, 0,
708 0, 0, 0,
709 0, 0, 0,
710 0, 0, 0,
telsoa014fcda012018-03-09 14:13:49 +0000711
jimfly01d84216a2018-10-26 12:56:21 +0100712 0, 0, 0,
713 0, 0, 0,
714 0, 1, 0,
715 0, 0, 0,
716 0, 0, 0
Matteo Martincigh747ef822018-12-18 09:26:39 +0000717
jimfly01d84216a2018-10-26 12:56:21 +0100718 }));
jimfly01d84216a2018-10-26 12:56:21 +0100719 auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
telsoa014fcda012018-03-09 14:13:49 +0000720
telsoa01c577f2c2018-08-31 09:22:23 +0100721 // Manually calculated.
jimfly01d84216a2018-10-26 12:56:21 +0100722 std::vector<T> originalOutputImage = std::vector<T>(
telsoa014fcda012018-03-09 14:13:49 +0000723 QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
724 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f,
725 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
726 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
727 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
728 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f,
729 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
730
731 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
732 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
733 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
734 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
735 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
736 -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
737
738 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
739 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
740 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
741 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
742 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
743 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
744
745 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
746 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
747 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
748 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
749 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
750 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
751 }));
752
telsoa01c577f2c2018-08-31 09:22:23 +0100753 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000754 if(biasEnabled)
755 {
jimfly01d84216a2018-10-26 12:56:21 +0100756 ApplyBias(originalOutputImage,
757 outputTensorInfo.GetQuantizationScale(),
758 outputTensorInfo.GetQuantizationOffset(),
759 biasV,
760 biasDesc.GetQuantizationScale(),
761 biasDesc.GetQuantizationOffset(),
762 outputWidth,
763 outputHeight);
telsoa014fcda012018-03-09 14:13:49 +0000764 }
765
766 LayerTestResult<T, 4> ret(outputTensorInfo);
jimfly01d84216a2018-10-26 12:56:21 +0100767 std::vector<T> outputImage = originalOutputImage;
Matthew Bentham8800c002018-11-19 13:19:28 +0000768 if (layout == armnn::DataLayout::NHWC)
jimfly01d84216a2018-10-26 12:56:21 +0100769 {
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000770 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
771 originalOutputImage.data(), outputImage.data(), sizeof(T));
jimfly01d84216a2018-10-26 12:56:21 +0100772 }
773
telsoa014fcda012018-03-09 14:13:49 +0000774 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
775
776 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
777 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
778
779 armnn::DepthwiseConvolution2dQueueDescriptor data;
780 armnn::WorkloadInfo info;
781 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
782 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
783
784 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
785 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
786
787 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
788 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
789
790 data.m_Weight = &weightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100791 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
telsoa014fcda012018-03-09 14:13:49 +0000792 data.m_Parameters.m_StrideX = 2;
793 data.m_Parameters.m_StrideY = 1;
794 data.m_Parameters.m_PadLeft = 0;
795 data.m_Parameters.m_PadRight = 0;
796 data.m_Parameters.m_PadTop = 1;
797 data.m_Parameters.m_PadBottom = 1;
798 data.m_Parameters.m_BiasEnabled = biasEnabled;
Matthew Bentham8800c002018-11-19 13:19:28 +0000799 data.m_Parameters.m_DataLayout = layout;
telsoa014fcda012018-03-09 14:13:49 +0000800
801 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
802 inputHandle->Allocate();
803 outputHandle->Allocate();
804
805 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
806
807 workload->Execute();
808
809 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
810
811 return ret;
812}
813
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000814template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
815 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000816LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
817 armnn::IWorkloadFactory& workloadFactory,
818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
819 const boost::multi_array<T, 4>& input,
820 const boost::multi_array<T, 4>& kernel,
821 const boost::multi_array<B, 1>& bias,
822 const boost::multi_array<T, 4>& outputExpected,
823 float qScale,
824 int32_t qOffset,
825 uint32_t padLeft = 0,
826 uint32_t padTop = 0,
827 uint32_t padRight = 0,
828 uint32_t padBottom = 0,
829 uint32_t strideX = 1,
830 uint32_t strideY = 1)
Nikhil Rajcec6b652018-10-12 13:51:57 +0100831{
832 unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
833 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
834 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
835 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[2]);
836
837 unsigned int kernelChanMul = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
Matteo Martincigh747ef822018-12-18 09:26:39 +0000838 unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
839 unsigned int kernelHeight = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
840 unsigned int kernelWidth = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100841
842 unsigned int outputNum = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
843 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
844 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
845 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
846
847 // Creates the tensors.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000848 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100849 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000850 ArmnnType);
851 armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
852 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Nikhil Rajcec6b652018-10-12 13:51:57 +0100853
854 // Set quantization parameters if the requested type is a quantized type.
855 if (armnn::IsQuantizedType<T>())
856 {
857 inputTensorInfo.SetQuantizationScale(qScale);
858 inputTensorInfo.SetQuantizationOffset(qOffset);
859 outputTensorInfo.SetQuantizationScale(qScale);
860 outputTensorInfo.SetQuantizationOffset(qOffset);
861 kernelDesc.SetQuantizationScale(qScale);
862 kernelDesc.SetQuantizationOffset(qOffset);
863 biasDesc.SetQuantizationScale(qScale*qScale);
864 biasDesc.SetQuantizationOffset(0);
865 }
866
867 // Construct the input data.
868 std::vector<T> inputData;
869 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
870 auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
871
872 // Construct the output data, with bias applied, as appropriate.
873 std::vector<T> outputData;
874 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
875
876 LayerTestResult<T, 4> ret(outputTensorInfo);
877 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
878
879 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
880 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
881
882 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
883 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
884
885 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
886
887 armnn::DepthwiseConvolution2dQueueDescriptor data;
888 data.m_Weight = &weightsTensor;
889 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
890 data.m_Parameters.m_StrideX = strideX;
891 data.m_Parameters.m_StrideY = strideY;
892 data.m_Parameters.m_PadLeft = padLeft;
893 data.m_Parameters.m_PadRight = padRight;
894 data.m_Parameters.m_PadTop = padTop;
895 data.m_Parameters.m_PadBottom = padBottom;
896 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
897
898 armnn::WorkloadInfo info;
899 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
900 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
901
902 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
903
904 inputHandle->Allocate();
905 outputHandle->Allocate();
906
907 CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
908
Nikhil Rajcec6b652018-10-12 13:51:57 +0100909 workload->Execute();
910
911 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
912
913 return ret;
914}
915
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000916template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000917LayerTestResult<T,4> Convolution1dTestImpl(
918 armnn::IWorkloadFactory& workloadFactory,
919 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
920 float qScale,
921 int32_t qOffset,
922 bool biasEnabled)
telsoa014fcda012018-03-09 14:13:49 +0000923{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000924 using B = armnn::ResolveType<ArmnnBType>;
telsoa01c577f2c2018-08-31 09:22:23 +0100925 // Until we have a specialist 1D convolution layer, we can fake one using
telsoa014fcda012018-03-09 14:13:49 +0000926 // 2D convolution with the final dimension set to 1.
927 // I don't anticipate this being particularly slow, given that convolution is implemented
928 // as a matrix multiplication, at which point dimension doesn't matter.
929
930 unsigned int batchSize = 1;
931 unsigned int inputChannels = 2;
932 unsigned int outputChannels = 3;
telsoa01c577f2c2018-08-31 09:22:23 +0100933 unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
telsoa014fcda012018-03-09 14:13:49 +0000934 unsigned int kernelSize = 3;
935 unsigned int padSize = 2;
936 unsigned int stride = 1;
telsoa01c577f2c2018-08-31 09:22:23 +0100937 unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
telsoa014fcda012018-03-09 14:13:49 +0000938
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000939 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
940 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
941 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
942 armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
telsoa014fcda012018-03-09 14:13:49 +0000943
944 // Set quantization parameters if the requested type is a quantized type.
945 if(armnn::IsQuantizedType<T>())
946 {
947 inputInfo.SetQuantizationScale(qScale);
948 inputInfo.SetQuantizationOffset(qOffset);
949 outputInfo.SetQuantizationScale(qScale);
950 outputInfo.SetQuantizationOffset(qOffset);
951 kernelInfo.SetQuantizationScale(qScale);
952 kernelInfo.SetQuantizationOffset(qOffset);
953 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
954 biasInfo.SetQuantizationOffset(0);
955 }
956
957 std::vector<T> inputData(
958 QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), {
959 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
960 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
961 }));
962
963 std::vector<T> kernelData(
964 QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), {
965 1.0f, 0.0f, 0.0f,
966 0.0f, 2.0f, -1.5f,
967
968 0.0f, 0.0f, 0.0f,
969 0.2f, 0.2f, 0.2f,
970
971 0.5f, 0.0f, 0.5f,
972 0.0f, -1.0f, 0.0f
973 }));
974
975 std::vector<B> biasData(
976 QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), {
977 1.0f, 0.0f, 0.0f
978 }));
979
980 std::vector<T> outputData(
981 QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), {
982 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
983 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
984 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
985 }));
986
telsoa01c577f2c2018-08-31 09:22:23 +0100987 // Optionally apply bias to output image.
telsoa014fcda012018-03-09 14:13:49 +0000988 if(biasEnabled)
989 {
990 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
991 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
992 1, outputSize);
993 }
994
995 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
996 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
997
998 armnn::Convolution2dQueueDescriptor data;
999 armnn::WorkloadInfo info;
1000 armnn::ScopedCpuTensorHandle weightsTensor(kernelInfo);
1001 armnn::ScopedCpuTensorHandle biasTensor(biasInfo);
1002
1003 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
1004 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
1005
1006 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1007 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
1008
1009 data.m_Weight = &weightsTensor;
1010 data.m_Bias = &biasTensor;
1011 data.m_Parameters.m_StrideX = 1;
1012 data.m_Parameters.m_StrideY = stride;
1013 data.m_Parameters.m_PadLeft = 0;
1014 data.m_Parameters.m_PadRight = 0;
1015 data.m_Parameters.m_PadTop = padSize;
1016 data.m_Parameters.m_PadBottom = padSize;
1017 data.m_Parameters.m_BiasEnabled = biasEnabled;
1018
1019 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
1020 inputHandle->Allocate();
1021 outputHandle->Allocate();
1022
1023 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
1024
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001025 ExecuteWorkload(*workload, memoryManager);
telsoa014fcda012018-03-09 14:13:49 +00001026
telsoa01c577f2c2018-08-31 09:22:23 +01001027 // Output
telsoa014fcda012018-03-09 14:13:49 +00001028 LayerTestResult<T,4> ret(outputInfo);
1029 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1030 ret.outputExpected = MakeTensor<T, 4>(outputInfo, outputData);
1031 return ret;
1032}
1033
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001034template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001035LayerTestResult<T,4> CompareConvolution2dTestImpl(
1036 armnn::IWorkloadFactory& workloadFactory,
1037 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1038 armnn::IWorkloadFactory& refWorkloadFactory)
telsoa014fcda012018-03-09 14:13:49 +00001039{
1040 unsigned int inputHeight = 8;
1041 unsigned int inputWidth = 16;
1042 unsigned int inputChannels = 3;
1043 unsigned int inputNum = 5;
1044
1045 unsigned int kernelHeight = 3;
1046 unsigned int kernelWidth = 3;
1047
1048 unsigned int strideX = 2;
1049 unsigned int strideY = 3;
1050 unsigned int padX = 1;
1051 unsigned int padY = 1;
1052
1053 unsigned int outputNum = inputNum;
1054 unsigned int outputChannels = 2;
1055 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1056 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1057
1058 armnn::TensorInfo inputTensorInfo;
1059 armnn::TensorInfo outputTensorInfo;
1060 armnn::TensorInfo kernelDesc;
1061 armnn::TensorInfo biasDesc;
1062
Matteo Martincigh747ef822018-12-18 09:26:39 +00001063 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1064 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1065 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1066 unsigned int biasShape[] = {outputChannels};
telsoa014fcda012018-03-09 14:13:49 +00001067
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001068 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
1069 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
1070 kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
1071 biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001072
1073 LayerTestResult<T,4> ret(outputTensorInfo);
1074
1075 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
1076 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
1077 auto bias = MakeRandomTensor<T, 1>(biasDesc, 1028);
1078
1079 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1080 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1081
1082 armnn::Convolution2dQueueDescriptor data;
1083 armnn::WorkloadInfo info;
1084 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
1085 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
1086
1087 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
1088 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
1089
1090 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1091 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1092 data.m_Weight = &weightsTensor;
1093 data.m_Bias = &biasTensor;
1094 data.m_Parameters.m_StrideX = strideX;
1095 data.m_Parameters.m_StrideY = strideY;
1096 data.m_Parameters.m_PadLeft = padX;
1097 data.m_Parameters.m_PadRight = padX;
1098 data.m_Parameters.m_PadTop = padY;
1099 data.m_Parameters.m_PadBottom = padY;
1100 data.m_Parameters.m_BiasEnabled = true;
1101
1102 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1103 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1104
1105 armnn::Convolution2dQueueDescriptor refData = data;
1106 armnn::WorkloadInfo refInfo = info;
1107 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1108 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1109
1110 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
1111 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
1112
1113 outputHandleRef->Allocate();
1114 inputHandleRef->Allocate();
1115
1116 inputHandle->Allocate();
1117 outputHandle->Allocate();
1118
1119 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1120 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1121
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001122 ExecuteWorkload(*workload, memoryManager);
Aron Virginas-Tar60578952018-10-31 11:04:01 +00001123
telsoa014fcda012018-03-09 14:13:49 +00001124 workloadRef->Execute();
1125
1126 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1127 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1128
1129 return ret;
1130}
1131
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001132template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001133LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
1134 armnn::IWorkloadFactory& workloadFactory,
1135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1136 armnn::IWorkloadFactory& refWorkloadFactory,
Matteo Martincigh21350152018-11-28 16:22:22 +00001137 const armnnUtils::DataLayoutIndexed& layout)
telsoa014fcda012018-03-09 14:13:49 +00001138{
1139 unsigned int inputHeight = 8;
1140 unsigned int inputWidth = 16;
1141 unsigned int inputChannels = 3;
1142 unsigned int inputNum = 5;
1143
1144 unsigned int kernelHeight = 3;
1145 unsigned int kernelWidth = 3;
1146 unsigned int channelMultiplier = 1;
1147
1148 unsigned int strideX = 2;
1149 unsigned int strideY = 3;
1150 unsigned int padX = 1;
1151 unsigned int padY = 1;
1152
1153 unsigned int outputNum = inputNum;
1154 unsigned int outputChannels = inputChannels * channelMultiplier;
1155 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1156 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1157
1158 armnn::TensorInfo inputTensorInfo;
1159 armnn::TensorInfo outputTensorInfo;
1160 armnn::TensorInfo kernelDesc;
1161 armnn::TensorInfo biasDesc;
1162
jimfly017af00da2018-10-31 14:43:53 +00001163
1164 std::vector<unsigned int> inputShape;
1165 std::vector<unsigned int> outputShape;
Matteo Martincigh747ef822018-12-18 09:26:39 +00001166 std::vector<unsigned int> kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth };
1167 std::vector<unsigned int> biasShape{ outputChannels };
jimfly017af00da2018-10-31 14:43:53 +00001168 switch (layout.GetDataLayout())
1169 {
1170 case armnn::DataLayout::NCHW:
1171 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
1172 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
jimfly017af00da2018-10-31 14:43:53 +00001173 break;
1174 case armnn::DataLayout ::NHWC:
1175 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
1176 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
jimfly017af00da2018-10-31 14:43:53 +00001177 break;
1178 default:
1179 throw armnn::InvalidArgumentException("unknown data layout ["
1180 + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
1181 }
telsoa014fcda012018-03-09 14:13:49 +00001182
1183 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
1184 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
1185 int32_t qOffset = 0;
1186
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001187 inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
1188 outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
1189 kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
jimfly017af00da2018-10-31 14:43:53 +00001190 biasDesc = armnn::TensorInfo(
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001191 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
telsoa014fcda012018-03-09 14:13:49 +00001192
1193 LayerTestResult<T, 4> ret(outputTensorInfo);
1194
1195 auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f);
1196 auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
jimfly01d84216a2018-10-26 12:56:21 +01001197 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
1198 biasDesc, 1028, 0.0f, 255.0f);
telsoa014fcda012018-03-09 14:13:49 +00001199
1200 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1201 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1202
1203 armnn::DepthwiseConvolution2dQueueDescriptor data;
1204 armnn::WorkloadInfo info;
1205 armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
1206 armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
1207
1208 AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
1209 AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
1210
1211 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1212 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1213 data.m_Weight = &weightsTensor;
1214 data.m_Bias = &biasTensor;
1215 data.m_Parameters.m_StrideX = strideX;
1216 data.m_Parameters.m_StrideY = strideY;
1217 data.m_Parameters.m_PadLeft = padX;
1218 data.m_Parameters.m_PadRight = padX;
1219 data.m_Parameters.m_PadTop = padY;
1220 data.m_Parameters.m_PadBottom = padY;
1221 data.m_Parameters.m_BiasEnabled = true;
jimfly017af00da2018-10-31 14:43:53 +00001222 data.m_Parameters.m_DataLayout = layout.GetDataLayout();
telsoa014fcda012018-03-09 14:13:49 +00001223
1224 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1225 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1226
1227 armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
1228 armnn::WorkloadInfo refInfo = info;
1229 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1230 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1231
1232 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
1233 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
1234
1235 outputHandleRef->Allocate();
1236 inputHandleRef->Allocate();
1237
1238 inputHandle->Allocate();
1239 outputHandle->Allocate();
1240
1241 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1242 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1243
1244 workload->Execute();
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001245
telsoa014fcda012018-03-09 14:13:49 +00001246 workloadRef->Execute();
1247
1248 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1249 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1250
1251 return ret;
1252}