blob: bd7cc40f271a5b72a04db11e1140989d01e79476 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "Conv2dTestImpl.hpp"
7
Colm Donelanc42a9872022-02-02 16:35:09 +00008#include <armnnUtils/QuantizeHelper.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +00009#include <armnnUtils/TensorUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010
Jan Eilers8eb25602020-03-09 12:13:48 +000011#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010012#include <armnn/utility/NumericCast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/DataLayoutIndexed.hpp>
14#include <armnnUtils/Permute.hpp>
15
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnn/backends/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <armnnTestUtils/DataLayoutUtils.hpp>
19#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000020#include <armnnTestUtils/WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021
Colm Donelanc42a9872022-02-02 16:35:09 +000022#include <armnnTestUtils/TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010024#include <string>
25
26//
27// Static data
28//
29
30// 2-channel bias used by a number of Conv2d tests.
31static std::vector<float> Bias2({0, 2});
32
33static std::vector<float> Bias4({1, 2, 3, 4});
34
35static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4});
36
37// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
38static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065using namespace armnnUtils;
66
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010067//
68// Helper templates
69//
70
71// Helper template that returns either Bias2 or an empty vector depending on whether bias is enabled.
72template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010073std::vector<T> GetBias2(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010074{
75 if(biasEnabled)
76 {
Sadik Armagan483c8112021-06-01 09:24:52 +010077 return QuantizedVector<T>(Bias2, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010078 }
79 else
80 {
Sadik Armagan483c8112021-06-01 09:24:52 +010081 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010082 }
83}
84
85// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
86template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010087std::vector<T> GetBias4(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088{
89 if(biasEnabled)
90 {
Sadik Armagan483c8112021-06-01 09:24:52 +010091 return QuantizedVector<T>(Bias4, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010092 }
93 else
94 {
Sadik Armagan483c8112021-06-01 09:24:52 +010095 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096 }
97}
98
99// Helper template that returns either Bias8 or an empty vector depending on whether bias is enabled.
100template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100101std::vector<T> GetBias8(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100102{
103 if(biasEnabled)
104 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100105 return QuantizedVector<T>(Bias8, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100106 }
107 else
108 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100109 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100110 }
111}
112
113// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
114template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100115std::vector<T> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100116{
117 const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
118 const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
119 const unsigned int outputChannels = outputInfo.GetShape()[channelsIndex];
120
121 switch (outputChannels)
122 {
123 case 2:
124 default:
125 {
126 return GetBias2<ArmnnType>(biasEnabled, qScale);
127 }
128 case 4:
129 {
130 return GetBias4<ArmnnType>(biasEnabled, qScale);
131 }
132 case 8:
133 {
134 return GetBias8<ArmnnType>(biasEnabled, qScale);
135 }
136 }
137}
138
139//
140// Implementation templates
141//
142
143// Mapping from input type to bias type for fully connected layers.
144// float => float, uint8_t => int32_t
145template<typename T>
146struct FullyConnectedBiasTypeForInputType;
147
148template<>
149struct FullyConnectedBiasTypeForInputType<float>
150{
151 using Type = float;
152};
153
154template<>
155struct FullyConnectedBiasTypeForInputType<uint8_t>
156{
157 using Type = int32_t;
158};
159
160// Modifies a std::vector in-place using a specified bias.
161template<typename T, typename B>
162void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
163 const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
164{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100165 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100166 "Invalid type and parameter combination.");
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100167 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100168 "Invalid type and parameter combination.");
169
170 // Note we need to dequantize and re-quantize the image value and the bias.
171 for (uint32_t i = 0; i < bias.size(); ++i)
172 {
173 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
174 for (uint32_t y = 0; y < h; ++y)
175 {
176 for (uint32_t x = 0; x < w; ++x)
177 {
178 uint32_t offset = (i * h + y) * w + x;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100179 ARMNN_ASSERT(offset < v.size());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100180 T& outRef = v[offset];
181 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
182 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
183 }
184 }
185 }
186}
187
188//
189// Convolution2d implementations
190//
191
192template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
193 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
194LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
195 armnn::IWorkloadFactory& workloadFactory,
196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100197 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100198 const std::vector<T>& originalInput,
199 const std::vector<T>& originalKernel,
200 const std::vector<B>& bias,
201 const std::vector<T>& originalOutputExpected,
202 const armnn::TensorShape& originalInputShape,
203 const armnn::TensorShape& originalKernelShape,
204 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100205 float qScale,
206 int32_t qOffset,
207 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
208 uint32_t padLeft = 0,
209 uint32_t padTop = 0,
210 uint32_t padRight = 0,
211 uint32_t padBottom = 0,
212 uint32_t strideX = 1,
213 uint32_t strideY = 1,
214 uint32_t dilationX = 1,
215 uint32_t dilationY = 1)
216{
Jan Eilers8eb25602020-03-09 12:13:48 +0000217 armnn::IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +0100218 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
219 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
220 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
221 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100222
Sadik Armagan483c8112021-06-01 09:24:52 +0100223 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
224 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
225 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
226 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100227
Sadik Armagan483c8112021-06-01 09:24:52 +0100228 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
229 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
230 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
231 unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100232
233 bool biasEnabled = bias.size() > 0;
234
235 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100236 ARMNN_ASSERT(inputNum == 1);
237 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100238
239 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100240 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100241
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100242 // Note these tensors will use two (identical) batches.
243 armnn::TensorInfo inputTensorInfo =
244 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
245 armnn::TensorInfo outputTensorInfo =
246 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
247 armnn::TensorInfo kernelDesc =
248 armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
249 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
250
251 // Set quantization parameters if the requested type is a quantized type.
252 if(armnn::IsQuantizedType<T>())
253 {
254 inputTensorInfo.SetQuantizationScale(qScale);
255 inputTensorInfo.SetQuantizationOffset(qOffset);
256 outputTensorInfo.SetQuantizationScale(qScale);
257 outputTensorInfo.SetQuantizationOffset(qOffset);
258 kernelDesc.SetQuantizationScale(qScale);
259 kernelDesc.SetQuantizationOffset(qOffset);
260 biasDesc.SetQuantizationScale(qScale*qScale);
261 biasDesc.SetQuantizationOffset(0);
262 }
263
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100264 // Construct input data - two batches of the same input image.
265 std::vector<T> inputImage;
266 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
267 std::vector<T> inputData;
268 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
269 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
270
271 // at this point if we require it permute the input data
272 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
273 if (layout == armnn::DataLayout::NHWC)
274 {
275 std::vector<T> tmp(inputData.size());
276 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
277 inputData = tmp;
278 }
279
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100280 std::vector<T> outputImage;
281 outputImage.assign(originalOutputExpected.data(),
282 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
283
284 // Apply bias to output image if it is enabled.
285 if(biasEnabled)
286 {
287 std::vector<T> biasV;
288 biasV.assign(bias.data(), bias.data() + outputChannels);
289 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
290 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
291 outputWidth, outputHeight);
292 }
293
Sadik Armagan483c8112021-06-01 09:24:52 +0100294 // Data will be copied from outputHandle
295 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
296
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100297 // Construct expected output data - two identical images.
Sadik Armagan483c8112021-06-01 09:24:52 +0100298 std::vector<T> expectedOutput;
299 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
300 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100301
302 // at this point if we require it permute the expected output
303 if (layout == armnn::DataLayout::NHWC)
304 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100305 std::vector<T> tmp(expectedOutput.size());
306 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T));
307 expectedOutput = tmp;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100308 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100309
310 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
311 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
312
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100313 armnn::Convolution2dQueueDescriptor data;
314 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +0100315 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
316 armnn::ScopedTensorHandle biasTensor(biasDesc);
Sadik Armagan483c8112021-06-01 09:24:52 +0100317
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100318 // Permute the kernel if necessary
Sadik Armagan483c8112021-06-01 09:24:52 +0100319 std::vector<T> kernel = originalKernel;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100320 if (layout == armnn::DataLayout::NHWC)
321 {
322 armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
323 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100324 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100325
326 if(biasEnabled)
327 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100328 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100329 }
330
331 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
332 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
333
334 data.m_Weight = &weightsTensor;
335 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
336 data.m_Parameters.m_StrideX = strideX;
337 data.m_Parameters.m_StrideY = strideY;
338 data.m_Parameters.m_PadLeft = padLeft;
339 data.m_Parameters.m_PadRight = padRight;
340 data.m_Parameters.m_PadTop = padTop;
341 data.m_Parameters.m_PadBottom = padBottom;
342 data.m_Parameters.m_BiasEnabled = biasEnabled;
343 data.m_Parameters.m_DataLayout = layout;
344 data.m_Parameters.m_DilationX = dilationX;
345 data.m_Parameters.m_DilationY = dilationY;
346
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
348 data,
349 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100350 inputHandle->Allocate();
351 outputHandle->Allocate();
352
Sadik Armagan483c8112021-06-01 09:24:52 +0100353 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100354
355 ExecuteWorkload(*workload, memoryManager);
356
Sadik Armagan483c8112021-06-01 09:24:52 +0100357 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100358
Sadik Armagan483c8112021-06-01 09:24:52 +0100359 return LayerTestResult<T, 4>(actualOutput,
360 expectedOutput,
361 outputHandle->GetShape(),
362 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100363}
364
365template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100366 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>,
367 armnn::DataType OutType = ArmnnType, typename O = armnn::ResolveType<OutType>>
368LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100369 armnn::IWorkloadFactory& workloadFactory,
370 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100371 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100372 const std::vector<T>& input,
373 const std::vector<T>& kernel,
374 const std::vector<B>& bias,
375 const std::vector<O>& outputExpected,
376 const armnn::TensorShape& inputShape,
377 const armnn::TensorShape& kernelShape,
378 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100379 const armnn::DataLayout dataLayout,
380 float qScale,
381 int32_t qOffset,
382 uint32_t padLeft = 1,
383 uint32_t padTop = 1,
384 uint32_t padRight = 1,
385 uint32_t padBottom = 1,
386 uint32_t strideX = 1,
387 uint32_t strideY = 1)
388{
Jan Eilers8eb25602020-03-09 12:13:48 +0000389 armnn::IgnoreUnused(qScale, qOffset);
Sadik Armagan483c8112021-06-01 09:24:52 +0100390 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
391 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[3]);
392 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[1]);
393 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100394
Sadik Armagan483c8112021-06-01 09:24:52 +0100395 unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
396 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
397 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
398 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100399
Sadik Armagan483c8112021-06-01 09:24:52 +0100400 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
401 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
402 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
403 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100404
405 bool biasEnabled = bias.size() > 0;
406
407 // Creates the tensors.
408 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
409 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100410 OutType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100411 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
412 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
413
414 // Construct the input data.
415 std::vector<T> inputData;
416 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100417
418 // Construct the output data, with bias applied, as appropriate.
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100419 std::vector<O> outputData;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100420 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
421
Sadik Armagan483c8112021-06-01 09:24:52 +0100422 std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +0100423
424 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
425 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
426
James Conroy1f58f032021-04-27 17:13:27 +0100427 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
Sadik Armagan483c8112021-06-01 09:24:52 +0100428 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100429
James Conroy1f58f032021-04-27 17:13:27 +0100430 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100431
432 armnn::Convolution2dQueueDescriptor data;
433
434 data.m_Weight = &weightsTensor;
435 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
436 data.m_Parameters.m_StrideX = strideX;
437 data.m_Parameters.m_StrideY = strideY;
438 data.m_Parameters.m_PadLeft = padLeft;
439 data.m_Parameters.m_PadRight = padRight;
440 data.m_Parameters.m_PadTop = padTop;
441 data.m_Parameters.m_PadBottom = padBottom;
442 data.m_Parameters.m_BiasEnabled = biasEnabled;
443 data.m_Parameters.m_DataLayout = dataLayout;
444
445 armnn::WorkloadInfo info;
446 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
447 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
448
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000449 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
450 data,
451 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100452 inputHandle->Allocate();
453 outputHandle->Allocate();
454
Sadik Armagan483c8112021-06-01 09:24:52 +0100455 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100456
457 ExecuteWorkload(*workload, memoryManager);
458
Sadik Armagan483c8112021-06-01 09:24:52 +0100459 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100460
Sadik Armagan483c8112021-06-01 09:24:52 +0100461 return LayerTestResult<O, 4>(actualOutput,
462 outputData,
463 outputHandle->GetShape(),
464 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100465}
466
467template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
468LayerTestResult<T,4> Convolution1dTestImpl(
469 armnn::IWorkloadFactory& workloadFactory,
470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100471 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100472 float qScale,
473 int32_t qOffset,
474 bool biasEnabled)
475{
476 using B = armnn::ResolveType<ArmnnBType>;
477 // Until we have a specialist 1D convolution layer, we can fake one using
478 // 2D convolution with the final dimension set to 1.
479 // I don't anticipate this being particularly slow, given that convolution is implemented
480 // as a matrix multiplication, at which point dimension doesn't matter.
481
482 unsigned int batchSize = 1;
483 unsigned int inputChannels = 2;
484 unsigned int outputChannels = 3;
485 unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
486 unsigned int kernelSize = 3;
487 unsigned int padSize = 2;
488 unsigned int stride = 1;
489 unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
490
491 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
492 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
493 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
494 armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
495
496 // Set quantization parameters if the requested type is a quantized type.
497 if(armnn::IsQuantizedType<T>())
498 {
499 inputInfo.SetQuantizationScale(qScale);
500 inputInfo.SetQuantizationOffset(qOffset);
501 outputInfo.SetQuantizationScale(qScale);
502 outputInfo.SetQuantizationOffset(qOffset);
503 kernelInfo.SetQuantizationScale(qScale);
504 kernelInfo.SetQuantizationOffset(qOffset);
505 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
506 biasInfo.SetQuantizationOffset(0);
507 }
508
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100509 std::vector<T> inputData = QuantizedVector<T>(
510 {
511 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
512 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
513 },
514 inputInfo.GetQuantizationScale(),
515 inputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100516
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100517 std::vector<T> kernelData = QuantizedVector<T>(
518 {
519 1.0f, 0.0f, 0.0f,
520 0.0f, 2.0f, -1.5f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100521
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100522 0.0f, 0.0f, 0.0f,
523 0.2f, 0.2f, 0.2f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100524
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100525 0.5f, 0.0f, 0.5f,
526 0.0f, -1.0f, 0.0f
527 },
528 kernelInfo.GetQuantizationScale(),
529 kernelInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100530
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100531 std::vector<B> biasData =
532 QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100533
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100534 std::vector<T> outputData = QuantizedVector<T>(
535 {
536 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100537 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100538 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
539 },
540 outputInfo.GetQuantizationScale(),
541 outputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100542
Sadik Armagan483c8112021-06-01 09:24:52 +0100543 std::vector<T> actualOutput(outputInfo.GetNumElements());
544
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100545 // Optionally apply bias to output image.
546 if(biasEnabled)
547 {
548 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
549 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
550 1, outputSize);
551 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100552
553 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
554 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
555
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100556 armnn::Convolution2dQueueDescriptor data;
557 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +0100558 armnn::ScopedTensorHandle weightsTensor(kernelInfo);
559 armnn::ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100560
561 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
562 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
563
564 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
565 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
566
567 data.m_Weight = &weightsTensor;
568 data.m_Bias = &biasTensor;
569 data.m_Parameters.m_StrideX = 1;
570 data.m_Parameters.m_StrideY = stride;
571 data.m_Parameters.m_PadLeft = 0;
572 data.m_Parameters.m_PadRight = 0;
573 data.m_Parameters.m_PadTop = padSize;
574 data.m_Parameters.m_PadBottom = padSize;
575 data.m_Parameters.m_BiasEnabled = biasEnabled;
576
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000577 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
578 data,
579 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100580 inputHandle->Allocate();
581 outputHandle->Allocate();
582
583 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
584
585 ExecuteWorkload(*workload, memoryManager);
586
Sadik Armagan483c8112021-06-01 09:24:52 +0100587 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
588
589 return LayerTestResult<T, 4>(actualOutput,
590 outputData,
591 outputHandle->GetShape(),
592 outputInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100593}
594
595template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
596LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
597 armnn::IWorkloadFactory& workloadFactory,
598 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100599 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100600 float qScale,
601 int32_t qOffset,
602 bool biasEnabled,
603 armnn::DataLayout dataLayout)
604{
Jan Eilers8eb25602020-03-09 12:13:48 +0000605 armnn::IgnoreUnused(biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100606 // Use common single-batch 5x5 image.
607
Sadik Armagan483c8112021-06-01 09:24:52 +0100608 armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType);
609 std::vector<T> input =
610 {
611 1, 5, 2, 3,
612 8, 7, 3, 6,
613 3, 3, 9, 1
614 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100615
616 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100617 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
618 std::vector<T> kernel =
619 {
620 4, 5, 6,
621 0, 0, 0,
622 3, 2, 1
623 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100624
625 // Expected output is 1 batch of a 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100626 armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100627 const std::vector<float> outputData =
Sadik Armagan483c8112021-06-01 09:24:52 +0100628 {
629 23, 41, 33, 21,
630 44, 65, 76, 52,
631 82, 85, 79, 42
632 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100633
634 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
635 workloadFactory,
636 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100637 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100638 input,
639 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100640 std::vector<T>(),
641 outputData,
642 inputDesc.GetShape(),
643 kernelDesc.GetShape(),
644 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100645 dataLayout,
646 qScale,
647 qOffset);
648}
649
650template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
651LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
652 armnn::IWorkloadFactory& workloadFactory,
653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100654 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100655 float qScale,
656 int32_t qOffset,
657 bool biasEnabled,
658 const armnn::DataLayout& dataLayout)
659{
Jan Eilers8eb25602020-03-09 12:13:48 +0000660 armnn::IgnoreUnused(biasEnabled);
Derek Lambertic374ff02019-12-10 21:57:35 +0000661
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100662 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100663 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType);
664 std::vector<T> input =
665 {
666 1, 5, 2, 3, 5,
667 8, 7, 3, 6, 3,
668 3, 3, 9, 1, 9,
669 4, 1, 8, 1, 3,
670 6, 8, 1, 9, 2
671 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100672
673 // Use a 3x3 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100674 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
675 std::vector<T> kernel =
676 {
677 4, 5, 6,
678 0, 0, 0,
679 3, 2, 1
680 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100681
682 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100683 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType);
684 std::vector<T> outputData =
685 {
686 23, 33, 24,
687 91, 99, 48,
688 26, 50, 19
689 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100690
691 uint32_t padLeft = 1;
692 uint32_t padTop = 1;
693 uint32_t padRight = 1;
694 uint32_t padBottom = 1;
695 uint32_t strideX = 2;
696 uint32_t strideY = 2;
697
698 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
699 workloadFactory,
700 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100701 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100702 input,
703 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100704 std::vector<T>(),
705 outputData,
706 inputDesc.GetShape(),
707 kernelDesc.GetShape(),
708 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100709 dataLayout,
710 qScale,
711 qOffset,
712 padLeft,
713 padTop,
714 padRight,
715 padBottom,
716 strideX,
717 strideY);
718}
719
720template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
721LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
722 armnn::IWorkloadFactory& workloadFactory,
723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100724 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100725 float qScale,
726 int32_t qOffset,
727 bool biasEnabled,
728 const armnn::DataLayout layout)
729{
730 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100731 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
732 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100733
734 // Use a 2-element batch with 3-channel 3x5 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100735 armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType);
736 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100737 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100738 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100739 1, 1, 1,
740 1, 1, 1,
741 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100743 0, 0, 0,
744 0, 0, 0,
745 0, 0, 0,
746 0, 0, 0,
747 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100748
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100749 2, 2, 2,
750 2, 2, 2,
751 2, 2, 2,
752 2, 2, 2,
753 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100754
755
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100756 0, 0, 0,
757 0, 0, 0,
758 0, 0, 0,
759 0, 0, 0,
760 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100761
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100762 1, 1, 1,
763 1, 1, 1,
764 1, 1, 1,
765 1, 1, 1,
766 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100767
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100768 0, 0, 0,
769 0, 0, 0,
770 0, 0, 0,
771 0, 0, 0,
772 0, 0, 0
773 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100774 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100775
776 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100777 armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType);
778 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100779 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
780 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
781 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
782 -23.5f, -23.5f, -23.5f,
783 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
784 -23.5f, -23.5f, -23.5f,
785
786 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
787 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
788 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
789 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100790 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100791 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100792
793 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
794 workloadFactory,
795 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100796 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100797 input,
798 kernel,
799 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
800 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100801 inputDesc.GetShape(),
802 kernelDesc.GetShape(),
803 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100804 qScale,
805 qOffset,
806 layout);
807}
808
809template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
810 typename T = armnn::ResolveType<ArmnnType>>
811LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
812 armnn::IWorkloadFactory& workloadFactory,
813 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100814 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100815 float qScale,
816 int32_t qOffset,
817 bool biasEnabled,
818 const armnn::DataLayout layout)
819{
820 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
821
822 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100823 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
824 std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
825 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100826
827 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100828 armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType);
829 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100830 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100831 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100832 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100833
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100834 0, 0, 0,
835 0, 0, 0,
836 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100837
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100838 2, 2, 2,
839 2, 2, 2,
840 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100841
842
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100843 0, 0, 0,
844 0, 0, 0,
845 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100846
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100847 1, 1, 1,
848 1, 1, 1,
849 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100850
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100851 0, 0, 0,
852 0, 0, 0,
853 0, 0, 0
854 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100855 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100856
857 // Expected output is 1 batch of a 2-channel 14x6 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100858 armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType);
859 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100860 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
861 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
862 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
863 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
864 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
865 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
866
867 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
868 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
869 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
870 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
871 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
872 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100873 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100874 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100875
876 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
877 workloadFactory,
878 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100879 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100880 input,
881 kernel,
882 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
883 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100884 inputDesc.GetShape(),
885 kernelDesc.GetShape(),
886 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100887 qScale,
888 qOffset,
889 layout);
890}
891
892template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
893 typename T = armnn::ResolveType<ArmnnType>>
894LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
895 armnn::IWorkloadFactory& workloadFactory,
896 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100897 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100898 const armnn::DataLayout layout,
899 float qScale,
900 int32_t qOffset)
901{
902 // Use a single-batch 1-channel 3x3 image as input.
Sadik Armagan483c8112021-06-01 09:24:52 +0100903 armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType);
904 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100905 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100906 11,21,31,
907 12,22,32,
908 13,23,33
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100909 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100910 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100911
912 // Use 1 batch of a 1-channel 2x2 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100913 armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType);
914 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100915 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100916 -11,-21,
917 -12,-22,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100918 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100919 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100920
921// Expected output is 1 batch of a 1-channel 6x8 image.
922// Manually calculated like this:
923//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
924//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
925//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
926//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
927//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
928//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
929//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Sadik Armagan483c8112021-06-01 09:24:52 +0100930 armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType);
931 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100932 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100933 0, 0, 0, 0, 0, 0,
934 -242, -594, -934, -372, 0, 0,
935 -495, -1190, -1850, -725, 0, 0,
936 -538, -1256, -1916, -748, 0, 0,
937 -273, -626, -946, -363, 0, 0,
938 0, 0, 0, 0, 0, 0,
939 0, 0, 0, 0, 0, 0,
940 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100941 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100942 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100943
944 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
945 workloadFactory,
946 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100947 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100948 input,
949 kernel,
950 GetBias2<ArmnnBType>(false, qScale * qScale),
951 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100952 inputDesc.GetShape(),
953 kernelDesc.GetShape(),
954 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100955 qScale,
956 qOffset,
957 layout,
958 1, // Padding left.
959 2, // Padding top.
960 3, // Padding right.
961 4); // Padding bottom.
962}
963
964template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
965 typename T = armnn::ResolveType<ArmnnType>>
966LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
967 armnn::IWorkloadFactory& workloadFactory,
968 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100969 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100970 const armnn::DataLayout layout,
971 float qScale,
972 int32_t qOffset)
973{
974 // Use a single-batch 1-channel 5x5 image as input.
975 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +0100976 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100977 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100978 11,21,31,41,51,
979 12,22,32,42,52,
980 13,23,33,43,53,
981 14,24,34,44,54,
982 15,25,35,45,55,
Sadik Armagan483c8112021-06-01 09:24:52 +0100983 }, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100984
985 // Use 1 batch of a 1-channel 4x4 kernel.
986 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +0100987 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100988 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100989 -11,-21,-31,-41,
990 -12,-22,-32,-42,
991 -13,-23,-33,-43,
992 -14,-24,-34,-44,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100993 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100994 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100995
996 // Expected output is 1 batch of a 1-channel 5x5 image.
997 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +0100998 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100999 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001000 -7140, -10580, -13940, -9300, -5230,
1001 -9590, -14120, -18520, -12290, -6860,
1002 -9980, -14560, -18960, -12560, -7000,
1003 -7518, -10904, -14144, -9318, -5152,
1004 -5032, -7256, -9376, -6142, -3368,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001005 },
Sadik Armagan483c8112021-06-01 09:24:52 +01001006 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001007
1008 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1009 workloadFactory,
1010 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001011 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001012 input,
1013 kernel,
1014 GetBias2<ArmnnBType>(false, qScale * qScale),
1015 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001016 inputDesc.GetShape(),
1017 kernelDesc.GetShape(),
1018 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001019 qScale,
1020 qOffset,
1021 layout,
1022 1, // Padding left.
1023 1, // Padding top.
1024 2, // Padding right.
1025 2); // Padding bottom.
1026}
1027
1028template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1029LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
1030 armnn::IWorkloadFactory& workloadFactory,
1031 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001032 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001033 const std::vector<float>& inputNoQuantizedValues,
1034 armnn::TensorInfo& inputTensorInfo,
1035 const std::vector<float>& kernelNoQuantizedValues,
1036 armnn::TensorInfo& kernelTensorInfo,
1037 const std::vector<float>& outputExpectedNoQuantizedValues,
1038 armnn::TensorInfo& outputTensorInfo,
1039 uint32_t dilationX,
1040 uint32_t dilationY,
1041 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1042 uint32_t padLeft = 0,
1043 uint32_t padTop = 0,
1044 uint32_t padRight = 0,
1045 uint32_t padBottom = 0,
1046 uint32_t strideX = 1,
1047 uint32_t strideY = 1,
1048 bool biasEnabled = false
1049)
1050{
1051 float qScale;
1052 int32_t qOffset;
1053 switch (ArmnnType)
1054 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00001055 case armnn::DataType::QAsymmU8:
Sadik Armagan303980c2020-04-17 12:45:14 +01001056 case armnn::DataType::QAsymmS8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001057 {
1058 qScale = 0.1f;
1059 qOffset = 128;
1060 break;
1061 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00001062 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001063 {
1064 qScale = 0.1f;
1065 qOffset = 0;
1066 break;
1067 }
1068 case armnn::DataType::Float32:
1069 default:
1070 {
1071 qScale = 0.f;
1072 qOffset = 0;
1073 break;
1074 }
1075 }
1076
1077 inputTensorInfo.SetQuantizationScale(qScale);
1078 inputTensorInfo.SetQuantizationOffset(qOffset);
1079 kernelTensorInfo.SetQuantizationScale(qScale);
1080 kernelTensorInfo.SetQuantizationOffset(qOffset);
1081 outputTensorInfo.SetQuantizationScale(qScale);
1082 outputTensorInfo.SetQuantizationOffset(qOffset);
1083
Sadik Armagan483c8112021-06-01 09:24:52 +01001084 auto input = QuantizedVector<T>(inputNoQuantizedValues,
1085 inputTensorInfo.GetQuantizationScale(),
1086 inputTensorInfo.GetQuantizationOffset());
1087 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
1088 kernelTensorInfo.GetQuantizationScale(),
1089 kernelTensorInfo.GetQuantizationOffset());
1090 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
1091 outputTensorInfo.GetQuantizationScale(),
1092 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001093
1094 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1095 workloadFactory,
1096 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001097 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001098 input,
1099 kernel,
1100 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1101 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001102 inputTensorInfo.GetShape(),
1103 kernelTensorInfo.GetShape(),
1104 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001105 qScale,
1106 qOffset,
1107 layout,
1108 padLeft,
1109 padTop,
1110 padRight,
1111 padBottom,
1112 strideX,
1113 strideY,
1114 dilationX,
1115 dilationY);
1116}
1117
1118template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1119LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
1120 armnn::IWorkloadFactory& workloadFactory,
1121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001122 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001123 bool biasEnabled,
1124 const armnn::DataLayout layout)
1125{
Sadik Armagan483c8112021-06-01 09:24:52 +01001126 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001127 std::vector<float> inputNoQuantizedValues =
1128 {
1129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1130 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1131 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1132 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1133 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1134 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1135 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1136 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1137 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1138 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1139 };
1140
1141 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1142 std::vector<float> kernelNoQuantizedValues =
1143 {
1144 1, 2, 3,
1145 4, 5, 6,
1146 7, 8, 9
1147 };
1148
1149 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1150 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1151 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1152 std::vector<float> outputExpectedNoQuantizedValues =
1153 {
1154 6., 5., 5., 5.,
1155 6., 5., 5., 5.,
1156 6., 5., 5., 5.,
1157 3., 2., 2., 2.
1158 };
1159
1160 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1161 workloadFactory,
1162 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001163 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001164 inputNoQuantizedValues,
1165 inputTensorInfo,
1166 kernelNoQuantizedValues,
1167 kernelTensorInfo,
1168 outputExpectedNoQuantizedValues,
1169 outputTensorInfo,
1170 3,
1171 3,
1172 layout,
1173 biasEnabled);
1174}
1175
1176template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1177LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
1178 armnn::IWorkloadFactory& workloadFactory,
1179 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001180 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001181 bool biasEnabled,
1182 const armnn::DataLayout layout)
1183{
Sadik Armagan483c8112021-06-01 09:24:52 +01001184 armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001185 std::vector<float> inputNoQuantizedValues =
1186 {
1187 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1190 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1191 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1192 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1194 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1196 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1197
1198 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1199 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1200 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1201 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1202 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1203 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1206 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1207 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1208 };
1209
Sadik Armagan483c8112021-06-01 09:24:52 +01001210 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001211 std::vector<float> kernelNoQuantizedValues =
1212 {
1213 1, 2, 3,
1214 4, 5, 6,
1215 7, 8, 9,
1216
1217 1, 2, 3,
1218 4, 5, 6,
1219 7, 8, 9
1220 };
1221
1222 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1223 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
Sadik Armagan483c8112021-06-01 09:24:52 +01001224 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001225 std::vector<float> outputExpectedNoQuantizedValues =
1226 {
1227 12., 10., 10., 10.,
1228 12., 10., 10., 10.,
1229 12., 10., 10., 10.,
1230 6., 4., 4., 4.
1231 };
1232
1233 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1234 workloadFactory,
1235 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001236 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001237 inputNoQuantizedValues,
1238 inputTensorInfo,
1239 kernelNoQuantizedValues,
1240 kernelTensorInfo,
1241 outputExpectedNoQuantizedValues,
1242 outputTensorInfo,
1243 3,
1244 3,
1245 layout,
1246 biasEnabled);
1247}
1248
1249template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1250LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
Sadik Armagan483c8112021-06-01 09:24:52 +01001251 armnn::IWorkloadFactory& workloadFactory,
1252 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001253 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001254 bool biasEnabled,
1255 const armnn::DataLayout layout)
1256{
Sadik Armagan483c8112021-06-01 09:24:52 +01001257 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001258 std::vector<float> inputNoQuantizedValues =
1259 {
1260 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1261 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1262 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1263 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1264 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1265 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1266 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1267 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1268 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1269 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1270 };
1271
Sadik Armagan483c8112021-06-01 09:24:52 +01001272 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001273 std::vector<float> kernelNoQuantizedValues =
1274 {
1275 1, 2,
1276 3, 4
1277 };
1278
1279 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
1280 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
1281 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
1282 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1283 std::vector<float> outputExpectedNoQuantizedValues =
1284 {
1285 4, 7, 7, 3,
1286 6, 10, 10, 4,
1287 6, 10, 10, 4,
1288 2, 3, 3, 1
1289 };
1290 uint32_t padLeft = 1;
1291 uint32_t padTop = 1;
1292 uint32_t padRight = 1;
1293 uint32_t padBottom = 1;
1294
1295 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1296 workloadFactory,
1297 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001298 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001299 inputNoQuantizedValues,
1300 inputTensorInfo,
1301 kernelNoQuantizedValues,
1302 kernelTensorInfo,
1303 outputExpectedNoQuantizedValues,
1304 outputTensorInfo,
1305 2,
1306 2,
1307 layout,
1308 padLeft,
1309 padTop,
1310 padRight,
1311 padBottom,
1312 3,
1313 3,
1314 biasEnabled
1315 );
1316}
1317
1318template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1319LayerTestResult<T,4> CompareConvolution2dTestImpl(
1320 armnn::IWorkloadFactory& workloadFactory,
1321 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001322 armnn::IWorkloadFactory& refWorkloadFactory,
1323 const armnn::ITensorHandleFactory& tensorHandleFactory,
1324 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001325{
1326 unsigned int inputHeight = 8;
1327 unsigned int inputWidth = 16;
1328 unsigned int inputChannels = 3;
1329 unsigned int inputNum = 5;
1330
1331 unsigned int kernelHeight = 3;
1332 unsigned int kernelWidth = 3;
1333
1334 unsigned int strideX = 2;
1335 unsigned int strideY = 3;
1336 unsigned int padX = 1;
1337 unsigned int padY = 1;
1338
1339 unsigned int outputNum = inputNum;
1340 unsigned int outputChannels = 2;
1341 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1342 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1343
1344 armnn::TensorInfo inputTensorInfo;
1345 armnn::TensorInfo outputTensorInfo;
1346 armnn::TensorInfo kernelDesc;
1347 armnn::TensorInfo biasDesc;
1348
1349 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1350 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1351 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1352 unsigned int biasShape[] = {outputChannels};
1353
1354 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
1355 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
1356 kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
1357 biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
1358
Sadik Armagan483c8112021-06-01 09:24:52 +01001359 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
1360 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
1361 auto bias = MakeRandomTensor<T>(biasDesc, 1028);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001362
Sadik Armagan483c8112021-06-01 09:24:52 +01001363 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1364 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01001365
1366 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1367 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1368
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001369 armnn::Convolution2dQueueDescriptor data;
1370 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +01001371 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1372 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001373
Sadik Armagan483c8112021-06-01 09:24:52 +01001374 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
1375 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001376
1377 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1378 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1379 data.m_Weight = &weightsTensor;
1380 data.m_Bias = &biasTensor;
1381 data.m_Parameters.m_StrideX = strideX;
1382 data.m_Parameters.m_StrideY = strideY;
1383 data.m_Parameters.m_PadLeft = padX;
1384 data.m_Parameters.m_PadRight = padX;
1385 data.m_Parameters.m_PadTop = padY;
1386 data.m_Parameters.m_PadBottom = padY;
1387 data.m_Parameters.m_BiasEnabled = true;
Keith Davisf500d6c2020-08-31 08:32:55 +01001388
1389 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1390 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1391
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001392 armnn::Convolution2dQueueDescriptor refData = data;
Sadik Armagan483c8112021-06-01 09:24:52 +01001393 armnn::WorkloadInfo refInfo = info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001394 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1395 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1396
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001397 std::unique_ptr<armnn::IWorkload> workload
1398 = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, data, info);
1399 std::unique_ptr<armnn::IWorkload> workloadRef
1400 = refWorkloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001401
1402 outputHandleRef->Allocate();
1403 inputHandleRef->Allocate();
1404
1405 inputHandle->Allocate();
1406 outputHandle->Allocate();
1407
Sadik Armagan483c8112021-06-01 09:24:52 +01001408 CopyDataToITensorHandle(inputHandle.get(), input.data());
1409 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001410
1411 ExecuteWorkload(*workload, memoryManager);
1412
1413 workloadRef->PostAllocationConfigure();
1414 workloadRef->Execute();
1415
Sadik Armagan483c8112021-06-01 09:24:52 +01001416 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1417 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001418
Sadik Armagan483c8112021-06-01 09:24:52 +01001419 return LayerTestResult<T, 4>(actualOutput,
1420 expectedOutput,
1421 outputHandle->GetShape(),
1422 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001423}
1424
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001425LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001428 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001429 bool biasEnabled,
1430 const armnn::DataLayout& dataLayout)
1431{
1432 // BFloat16 input and weight, Float32 output
1433 armnn::IgnoreUnused(biasEnabled);
1434
1435 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001436 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001437
1438 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1439 {
1440 10.0367984f, // 10.0625
1441 2.0380895f, // 2.03125
1442 15.0420157f, // 15.0625
1443 22.0675631f, // 22.125
1444 8.0938920f, // 8.125
1445 5.0476106f, // 5.0625
1446 80.1035490f, // 80
1447 100.1260370f, // 100
1448 55.0461647f, // 55
1449 120.0883828f, // 120
1450 9.1159540f, // 9.125
1451 90.0498519f, // 90
1452 200.0104630f, // 200
1453 30.0154114f, // 30
1454 75.00137681f, // 75
1455 30.0344238f, // 30
1456 25.0356445f, // 25
1457 130.0495605f, // 130
1458 60.0683594f, // 60
1459 35.0991211f, // 35
1460 8.0461426f, // 8.0625
1461 12.0996094f, // 12.125
1462 98.1269530f, // 98
1463 125.0393066f, // 125
1464 5.103516f // 5.0937
1465 },
1466 1.0f, 0);
1467
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001468 // Use a 3x3 kernel.
1469 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1470
1471 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1472 {
1473 -0.126184f, // -0.125977
1474 -0.150468f, // -0.150391
1475 -0.101412f, // -0.101562
1476 -0.0586369f,// -0.0585938
1477 -0.0865864f,// -0.0864258
1478 -0.0435089f,// -0.043457
1479 0.0347555f, // 0.034668
1480 0.0323111f, // 0.0322266
1481 0.0385381f // 0.0385742
1482 },
1483 1.0f, 0);
1484
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001485 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001486 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001487
1488 // Expected output (with results if calculated as FP32 in the comments)
1489 const std::vector<float> outputData =
1490 {
1491 2.296875f, // 2.29240716
1492 5.75f, // 5.75851926
1493 3.78125f, // 3.79855026
1494 -11.625f, // -11.65498118
1495 -47.25f, // -47.27316893
1496 -30.0f, // -30.04771684
1497 -8.25f, // -8.28126168
1498 -43.5f, // -43.46531337
1499 -20.625f // -20.63477281
1500 };
1501
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001502 uint32_t padLeft = 1;
1503 uint32_t padTop = 1;
1504 uint32_t padRight = 1;
1505 uint32_t padBottom = 1;
1506 uint32_t strideX = 2;
1507 uint32_t strideY = 2;
1508
1509 return SimpleConvolution2dNhwcTestImpl
1510 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1511 workloadFactory,
1512 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001513 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001514 inputValues,
1515 kernelValues,
1516 std::vector<float>(),
1517 outputData,
1518 inputDesc.GetShape(),
1519 kernelDesc.GetShape(),
1520 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001521 dataLayout,
1522 1.0f,
1523 0,
1524 padLeft,
1525 padTop,
1526 padRight,
1527 padBottom,
1528 strideX,
1529 strideY);
1530}
1531
1532LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
1533 armnn::IWorkloadFactory& workloadFactory,
1534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001535 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001536 bool biasEnabled,
1537 const armnn::DataLayout& dataLayout)
1538{
1539 // BFloat16 input and weight, Float32 output
1540 armnn::IgnoreUnused(biasEnabled);
1541
1542 // Input is a single-batch, 1 channel, 5x5 image.
1543 armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1544
1545 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1546 {
1547 0.0367984f, // 0.0368652
1548 0.0380895f, // 0.0380859
1549 0.0420157f, // 0.0419922
1550 0.0675631f, // 0.0673828
1551 0.0938920f, // 0.09375
1552 0.0476106f, // 0.0476074
1553 0.1035490f, // 0.103516
1554 0.1260370f, // 0.125977
1555 0.0461647f, // 0.0461426
1556 0.0883828f, // 0.0883789
1557 0.1159540f, // 0.115723
1558 0.0498519f, // 0.0498047
1559 0.0104630f, // 0.010437
1560 0.0154114f, // 0.0154419
1561 0.00137681f, // 0.00137329
1562 0.0344238f, // 0.0344616
1563 0.0356445f, // 0.0355693
1564 0.0495605f, // 0.0495018
1565 0.0683594f, // 0.0683308
1566 0.0991211f, // 0.0988837
1567 0.0461426f, // 0.0461838
1568 0.0996094f, // 0.0997546
1569 0.1269530f, // 0.127099
1570 0.0393066f, // 0.0392791
1571 0.103516f // 0.103641
1572 },
1573 1.0f, 0);
1574
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001575 // Use a 3x3 kernel.
1576 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1577
1578 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1579 {
1580 -0.126184f, // -0.125977
1581 -0.150468f, // -0.150391
1582 -0.101412f, // -0.101562
1583 -0.0586369f,// -0.0585938
1584 -0.0865864f,// -0.0864258
1585 -0.0435089f,// -0.043457
1586 0.0347555f, // 0.034668
1587 0.0323111f, // 0.0322266
1588 0.0385381f // 0.0385742
1589 },
1590 1.0f, 0);
1591
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001592 // Expected output is a single-batch, 1 channel, 3x3 image.
1593 armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1594
1595 // Expected output (with results if calculated as FP32 in the comments)
1596 const std::vector<float> outputData =
1597 {
1598 0.000686645508f, // 0.000685
1599 0.000640869141f, // 0.000639
1600 -0.00759887695f, // -0.007631
1601 -0.02734375f, // -0.027388
1602 -0.0356445312f, // -0.035737
1603 -0.0145874023f, // -0.014568
1604 -0.0170898438f, // -0.017124
1605 -0.0373535156f, // -0.037431
1606 -0.0346679688f // -0.034808
1607 };
1608
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001609 uint32_t padLeft = 1;
1610 uint32_t padTop = 1;
1611 uint32_t padRight = 1;
1612 uint32_t padBottom = 1;
1613 uint32_t strideX = 2;
1614 uint32_t strideY = 2;
1615
1616 return SimpleConvolution2dNhwcTestImpl
1617 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1618 workloadFactory,
1619 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001620 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001621 inputValues,
1622 kernelValues,
1623 std::vector<float>(),
1624 outputData,
1625 inputDesc.GetShape(),
1626 kernelDesc.GetShape(),
1627 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001628 dataLayout,
1629 1.0f,
1630 0,
1631 padLeft,
1632 padTop,
1633 padRight,
1634 padBottom,
1635 strideX,
1636 strideY);
1637}
1638
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001639//
1640// DepthwiseConvolution2d implementations
1641//
1642
1643template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1644 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
1645LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001648 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001649 const std::vector<T>& input,
1650 const std::vector<T>& kernel,
1651 const std::vector<B>& bias,
1652 const std::vector<T>& outputExpected,
1653 const armnn::TensorShape& inputShape,
1654 const armnn::TensorShape& kernelShape,
1655 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001656 float qScale,
1657 int32_t qOffset,
1658 const armnn::DataLayout layout,
1659 uint32_t padLeft = 0,
1660 uint32_t padTop = 0,
1661 uint32_t padRight = 0,
1662 uint32_t padBottom = 0,
1663 uint32_t strideX = 1,
1664 uint32_t strideY = 1)
1665{
Sadik Armagan483c8112021-06-01 09:24:52 +01001666 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
1667 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[1]);
1668 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
1669 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
Jan Eilers53ef7952021-06-02 12:01:25 +01001670 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
1671 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
1672 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
Sadik Armagan483c8112021-06-01 09:24:52 +01001673 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
1674 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
1675 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
1676 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001677
1678 // If a bias is used, its size must equal the number of output channels.
1679 bool biasEnabled = bias.size() > 0;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001680 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001681
1682 // Creates the tensors.
1683 armnn::TensorInfo inputTensorInfo =
1684 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1685 armnn::TensorInfo outputTensorInfo =
1686 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001687 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001688 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
1689
1690 // Set quantization parameters if the requested type is a quantized type.
1691 if (armnn::IsQuantizedType<T>())
1692 {
1693 inputTensorInfo.SetQuantizationScale(qScale);
1694 inputTensorInfo.SetQuantizationOffset(qOffset);
1695 outputTensorInfo.SetQuantizationScale(qScale);
1696 outputTensorInfo.SetQuantizationOffset(qOffset);
1697 kernelDesc.SetQuantizationScale(qScale);
1698 kernelDesc.SetQuantizationOffset(qOffset);
1699 biasDesc.SetQuantizationScale(qScale*qScale);
1700 biasDesc.SetQuantizationOffset(0);
1701 }
1702
1703 // Construct the input data.
1704 std::vector<T> inputData;
1705 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1706
1707 // At this point if we require it permute the input data
1708 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1709 if (layout == armnn::DataLayout::NHWC)
1710 {
1711 std::vector<T> tmp(inputData.size());
1712 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1713 inputData = tmp;
1714 }
1715
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001716 // Construct the output data, with bias applied, as appropriate.
1717 std::vector<T> outputData;
1718 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1719 if (biasEnabled)
1720 {
1721 std::vector<T> biasV;
1722 biasV.assign(bias.data(), bias.data() + outputChannels);
1723 ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1724 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1725 outputWidth, outputHeight);
1726 }
1727
Sadik Armagan483c8112021-06-01 09:24:52 +01001728 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001729
1730 // At this point if we require it permute the expected output
1731 if (layout == armnn::DataLayout::NHWC)
1732 {
1733 std::vector<T> tmp(outputData.size());
1734 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
1735 outputData = tmp;
1736 }
1737
Keith Davisf500d6c2020-08-31 08:32:55 +01001738 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1739 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1740
James Conroy1f58f032021-04-27 17:13:27 +01001741 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001742
Sadik Armagan483c8112021-06-01 09:24:52 +01001743 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001744
James Conroy1f58f032021-04-27 17:13:27 +01001745 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001746 if (biasEnabled)
1747 {
Sadik Armagan483c8112021-06-01 09:24:52 +01001748 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001749 }
1750
1751 armnn::DepthwiseConvolution2dQueueDescriptor data;
1752 data.m_Weight = &weightsTensor;
1753 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
1754 data.m_Parameters.m_StrideX = strideX;
1755 data.m_Parameters.m_StrideY = strideY;
1756 data.m_Parameters.m_PadLeft = padLeft;
1757 data.m_Parameters.m_PadRight = padRight;
1758 data.m_Parameters.m_PadTop = padTop;
1759 data.m_Parameters.m_PadBottom = padBottom;
1760 data.m_Parameters.m_BiasEnabled = biasEnabled;
1761 data.m_Parameters.m_DataLayout = layout;
1762
1763 armnn::WorkloadInfo info;
1764 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1765 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1766
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001767 std::unique_ptr<armnn::IWorkload> workload
1768 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001769 inputHandle->Allocate();
1770 outputHandle->Allocate();
1771
Sadik Armagan483c8112021-06-01 09:24:52 +01001772 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001773
1774 ExecuteWorkload(*workload, memoryManager);
1775
Sadik Armagan483c8112021-06-01 09:24:52 +01001776 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001777
Sadik Armagan483c8112021-06-01 09:24:52 +01001778 return LayerTestResult<T, 4>(actualOutput,
1779 outputData,
1780 outputHandle->GetShape(),
1781 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001782}
1783
1784template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1785LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
1786 armnn::IWorkloadFactory& workloadFactory,
1787 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001788 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001789 float qScale,
1790 int32_t qOffset,
1791 bool biasEnabled,
1792 const armnn::DataLayout layout)
1793{
1794 using B = armnn::ResolveType<ArmnnBType>;
1795
1796 unsigned int inputHeight = 3;
1797 unsigned int inputWidth = 3;
1798 unsigned int inputChannels = 2;
1799 unsigned int inputNum = 1;
1800
1801 unsigned int kernelHeight = 3;
1802 unsigned int kernelWidth = 3;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001803
1804 unsigned int outputHeight = 1;
1805 unsigned int outputWidth = 1;
Jan Eilers53ef7952021-06-02 12:01:25 +01001806 unsigned int outputChannels = inputChannels;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001807 unsigned int outputNum = inputNum;
1808
1809 armnn::TensorInfo inputTensorInfo =
1810 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1811 armnn::TensorInfo outputTensorInfo =
1812 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001813 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001814 ArmnnType);
1815 armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
1816
1817 // Set quantization parameters if the requested type is a quantized type.
1818 if(armnn::IsQuantizedType<T>())
1819 {
1820 inputTensorInfo.SetQuantizationScale(qScale);
1821 inputTensorInfo.SetQuantizationOffset(qOffset);
1822 outputTensorInfo.SetQuantizationScale(qScale);
1823 outputTensorInfo.SetQuantizationOffset(qOffset);
1824 kernelDesc.SetQuantizationScale(qScale);
1825 kernelDesc.SetQuantizationOffset(qOffset);
1826 biasDesc.SetQuantizationScale(qScale*qScale);
1827 biasDesc.SetQuantizationOffset(0);
1828 }
1829 std::vector<T> inputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001830 QuantizedVector<T>({
1831 1.f, 2.f, 1.f,
1832 2.f, 1.f, 2.f,
1833 1.f, 2.f, 1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001834
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001835 1.f, 2.f, 1.f,
1836 2.f, 1.f, 2.f,
1837 1.f, 2.f, 1.f,
1838 },
1839 inputTensorInfo.GetQuantizationScale(),
1840 inputTensorInfo.GetQuantizationOffset()));
1841
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001842 // at this point if we require it permute the input data
1843 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1844 if (layout == armnn::DataLayout::NHWC)
1845 {
1846 std::vector<T> tmp(inputData.size());
1847 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1848 inputData = tmp;
1849 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001850
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001851 std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1852 biasDesc.GetQuantizationScale(),
1853 biasDesc.GetQuantizationOffset()));
1854
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001855 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001856 QuantizedVector<T>({
1857 1.f, 0.f, 1.f,
1858 0.f, 0.f, 0.f,
1859 -1.f, 0.f, -1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001860
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001861 1.f, 0.f, 1.f,
1862 0.f, 0.f, 0.f,
1863 -1.f, 0.f, -1.f,
1864 },
1865 kernelDesc.GetQuantizationScale(),
1866 kernelDesc.GetQuantizationOffset()));
1867
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001868 // Manually calculated.
1869 std::vector<T> outputImage(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001870 QuantizedVector<T>({ 0.f, 0.f },
1871 outputTensorInfo.GetQuantizationScale(),
1872 outputTensorInfo.GetQuantizationOffset())
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001873 );
1874
1875 // Optionally apply bias to output image.
1876 if(biasEnabled)
1877 {
1878 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1879 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1880 outputWidth, outputHeight);
1881 }
1882
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001883 if (layout == armnn::DataLayout::NHWC)
1884 {
1885 std::vector<T> tmp(outputImage.size());
1886 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
1887 outputImage = tmp;
1888 }
1889
Sadik Armagan483c8112021-06-01 09:24:52 +01001890 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01001891
1892 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1893 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1894
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001895 armnn::DepthwiseConvolution2dQueueDescriptor data;
1896 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +01001897 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1898 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001899
Sadik Armagan483c8112021-06-01 09:24:52 +01001900 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
1901 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001902
1903 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1904 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1905
1906 data.m_Weight = &weightsTensor;
1907 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
1908 data.m_Parameters.m_StrideX = 1;
1909 data.m_Parameters.m_StrideY = 1;
1910 data.m_Parameters.m_PadLeft = 0;
1911 data.m_Parameters.m_PadRight = 0;
1912 data.m_Parameters.m_PadTop = 0;
1913 data.m_Parameters.m_PadBottom = 0;
1914 data.m_Parameters.m_BiasEnabled = biasEnabled;
1915 data.m_Parameters.m_DataLayout = layout;
1916
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001917 std::unique_ptr<armnn::IWorkload> workload
1918 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001919 inputHandle->Allocate();
1920 outputHandle->Allocate();
1921
Sadik Armagan483c8112021-06-01 09:24:52 +01001922 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001923
1924 ExecuteWorkload(*workload, memoryManager);
1925
Sadik Armagan483c8112021-06-01 09:24:52 +01001926 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001927
Sadik Armagan483c8112021-06-01 09:24:52 +01001928 return LayerTestResult<T, 4>(actualOutput,
1929 outputImage,
1930 outputHandle->GetShape(),
1931 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001932}
1933
1934template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1935LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
1936 armnn::IWorkloadFactory& workloadFactory,
1937 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001938 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001939 float qScale,
1940 int32_t qOffset,
1941 bool biasEnabled,
1942 const armnn::DataLayout layout)
1943{
1944 using B = armnn::ResolveType<ArmnnBType>;
1945
1946 unsigned int depthMultiplier = 2;
1947
1948 unsigned int inputHeight = 8;
1949 unsigned int inputWidth = 16;
1950 unsigned int inputChannels = 2;
1951 unsigned int inputBatchSize = 1;
1952
1953 unsigned int kernelHeight = 5;
1954 unsigned int kernelWidth = 3;
1955
1956 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
1957 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
1958 unsigned int outputChannels = inputChannels * depthMultiplier;
1959 unsigned int outputBatchSize = inputBatchSize;
1960
1961 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
1962 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1963 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
1964 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001965 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001966 ArmnnType);
1967 armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
1968
1969 // Set quantization parameters if the requested type is a quantized type.
1970 if(armnn::IsQuantizedType<T>())
1971 {
1972 inputTensorInfo.SetQuantizationScale(qScale);
1973 inputTensorInfo.SetQuantizationOffset(qOffset);
1974 outputTensorInfo.SetQuantizationScale(qScale);
1975 outputTensorInfo.SetQuantizationOffset(qOffset);
1976 kernelDesc.SetQuantizationScale(qScale);
1977 kernelDesc.SetQuantizationOffset(qOffset);
1978 biasDesc.SetQuantizationScale(qScale*qScale);
1979 biasDesc.SetQuantizationOffset(0);
1980 }
1981
1982 // NOTE: originalInputData is in NCHW format
1983 std::vector<T> originalInputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001984 QuantizedVector<T>({
1985 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1986 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1987 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1988 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1989 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1990 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1991 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1992 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1993 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1994 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1995 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1996 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1997 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1998 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1999 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2000 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
2001 },
2002 inputTensorInfo.GetQuantizationScale(),
2003 inputTensorInfo.GetQuantizationOffset()));
2004
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002005 std::vector<T> inputData = originalInputData;
2006 // at this point if we require it permute the input data
2007 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2008 if (layout == armnn::DataLayout::NHWC)
2009 {
2010 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
2011 originalInputData.data(), inputData.data(), sizeof(T));
2012 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002013
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002014 std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2015 biasDesc.GetQuantizationScale(),
2016 biasDesc.GetQuantizationOffset());
2017
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002018 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002019 QuantizedVector<T>({
2020 1, 1, 1,
2021 1, -1, 1,
2022 1, 1, 1,
2023 1, 1, 1,
2024 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002025
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002026 2, 2, 2,
2027 2, 2, 2,
2028 2, 2, 2,
2029 2, 2, 2,
2030 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002031
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002032 0, 0, 0,
2033 0, -1, 0,
2034 0, 0, 0,
2035 0, 0, 0,
2036 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002037
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002038 0, 0, 0,
2039 0, 0, 0,
2040 0, 1, 0,
2041 0, 0, 0,
2042 0, 0, 0
2043 },
2044 kernelDesc.GetQuantizationScale(),
2045 kernelDesc.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002046
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002047 // Manually calculated.
2048 std::vector<T> originalOutputImage = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002049 QuantizedVector<T>({
Jan Eilers53ef7952021-06-02 12:01:25 +01002050 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2051 5, 5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5,
2052 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5, 5, 5, 5, 5, 5, 5,
2053 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5,
2054 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 6, 6, 6, 6, 6, 6, 6,
2055 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
2056 1, 3, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2057 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2058 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2059 2, 4, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2060 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2061 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002062 },
2063 outputTensorInfo.GetQuantizationScale(),
2064 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002065
2066 // Optionally apply bias to output image.
2067 if(biasEnabled)
2068 {
2069 ApplyBias(originalOutputImage,
2070 outputTensorInfo.GetQuantizationScale(),
2071 outputTensorInfo.GetQuantizationOffset(),
2072 biasV,
2073 biasDesc.GetQuantizationScale(),
2074 biasDesc.GetQuantizationOffset(),
2075 outputWidth,
2076 outputHeight);
2077 }
2078
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002079 std::vector<T> outputImage = originalOutputImage;
2080 if (layout == armnn::DataLayout::NHWC)
2081 {
2082 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
2083 originalOutputImage.data(), outputImage.data(), sizeof(T));
2084 }
2085
Sadik Armagan483c8112021-06-01 09:24:52 +01002086 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01002087
2088 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2089 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2090
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002091 armnn::DepthwiseConvolution2dQueueDescriptor data;
2092 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +01002093 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2094 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002095
Sadik Armagan483c8112021-06-01 09:24:52 +01002096 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
2097 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002098
2099 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2100 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2101
2102 data.m_Weight = &weightsTensor;
2103 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
2104 data.m_Parameters.m_StrideX = 2;
2105 data.m_Parameters.m_StrideY = 1;
2106 data.m_Parameters.m_PadLeft = 0;
2107 data.m_Parameters.m_PadRight = 0;
2108 data.m_Parameters.m_PadTop = 1;
2109 data.m_Parameters.m_PadBottom = 1;
2110 data.m_Parameters.m_BiasEnabled = biasEnabled;
2111 data.m_Parameters.m_DataLayout = layout;
2112
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002113 std::unique_ptr<armnn::IWorkload> workload
2114 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002115 inputHandle->Allocate();
2116 outputHandle->Allocate();
2117
Sadik Armagan483c8112021-06-01 09:24:52 +01002118 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002119
2120 ExecuteWorkload(*workload, memoryManager);
2121
Sadik Armagan483c8112021-06-01 09:24:52 +01002122 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002123
Sadik Armagan483c8112021-06-01 09:24:52 +01002124 return LayerTestResult<T, 4>(actualOutput,
2125 outputImage,
2126 outputHandle->GetShape(),
2127 outputTensorInfo.GetShape());
2128
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002129}
2130
2131template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2132 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
2133LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
2134 armnn::IWorkloadFactory& workloadFactory,
2135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002136 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01002137 const std::vector<T>& originalInput,
2138 const std::vector<T>& originalKernel,
2139 const std::vector<B>& bias,
2140 const std::vector<T>& originalOutputExpected,
2141 const armnn::TensorShape& originalInputShape,
2142 const armnn::TensorShape& originalKernelShape,
2143 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002144 float qScale,
2145 int32_t qOffset,
2146 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
2147 uint32_t padLeft = 0,
2148 uint32_t padTop = 0,
2149 uint32_t padRight = 0,
2150 uint32_t padBottom = 0,
2151 uint32_t strideX = 1,
2152 uint32_t strideY = 1,
2153 uint32_t dilationX = 1,
2154 uint32_t dilationY = 1)
2155{
Sadik Armagan483c8112021-06-01 09:24:52 +01002156 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
2157 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
2158 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
2159 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002160
Sadik Armagan483c8112021-06-01 09:24:52 +01002161 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
2162 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
2163 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
2164 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002165
Jan Eilers53ef7952021-06-02 12:01:25 +01002166 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
2167 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
2168 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002169
2170 bool biasEnabled = bias.size() > 0;
2171
2172 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002173 ARMNN_ASSERT(inputNum == 1);
2174 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002175
2176 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002177 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002178
2179
2180 // Note these tensors will use two (identical) batches.
2181 armnn::TensorInfo inputTensorInfo =
2182 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2183 armnn::TensorInfo outputTensorInfo =
2184 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
2185
2186 // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
Jan Eilers53ef7952021-06-02 12:01:25 +01002187 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002188
2189 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
2190
2191 // Set quantization parameters if the requested type is a quantized type.
2192 if(armnn::IsQuantizedType<T>())
2193 {
2194 inputTensorInfo.SetQuantizationScale(qScale);
2195 inputTensorInfo.SetQuantizationOffset(qOffset);
2196 outputTensorInfo.SetQuantizationScale(qScale);
2197 outputTensorInfo.SetQuantizationOffset(qOffset);
2198 kernelDesc.SetQuantizationScale(qScale);
2199 kernelDesc.SetQuantizationOffset(qOffset);
2200 biasDesc.SetQuantizationScale(qScale*qScale);
2201 biasDesc.SetQuantizationOffset(0);
2202 }
2203
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002204 // Construct input data
2205 std::vector<T> input;
2206 input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2207 std::vector<T> inputData;
2208 inputData.insert(inputData.end(), input.begin(), input.end());
2209 inputData.insert(inputData.end(), input.begin(), input.end());
2210
2211 // at this point if we require it permute the input data
2212 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2213 if (layout == armnn::DataLayout::NHWC)
2214 {
2215 std::vector<T> tmp(inputData.size());
2216 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
2217 inputData = tmp;
2218 }
2219
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002220 std::vector<T> output;
2221 output.assign(originalOutputExpected.data(),
2222 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2223
2224 // Apply bias to output data if it is enabled.
2225 if(biasEnabled)
2226 {
2227 std::vector<T> biasV;
2228 biasV.assign(bias.data(), bias.data() + outputChannels);
2229 ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
2230 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2231 outputWidth, outputHeight);
2232 }
2233
Sadik Armagan483c8112021-06-01 09:24:52 +01002234 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2235
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002236 // Construct expected output data
2237 std::vector<T> outputData;
2238 outputData.insert(outputData.end(), output.begin(), output.end());
2239 outputData.insert(outputData.end(), output.begin(), output.end());
2240
2241 // at this point if we require it permute the expected output
2242 if (layout == armnn::DataLayout::NHWC)
2243 {
2244 std::vector<T> tmp(outputData.size());
2245 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
2246 outputData = tmp;
2247 }
Keith Davisf500d6c2020-08-31 08:32:55 +01002248
2249 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2250 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2251
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002252 armnn::DepthwiseConvolution2dQueueDescriptor data;
2253 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +01002254 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2255 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002256
Sadik Armagan483c8112021-06-01 09:24:52 +01002257 AllocateAndCopyDataToITensorHandle(&weightsTensor, originalKernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002258
2259 if(biasEnabled)
2260 {
Sadik Armagan483c8112021-06-01 09:24:52 +01002261 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002262 }
2263
2264 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2265 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2266
2267 data.m_Weight = &weightsTensor;
2268 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
2269 data.m_Parameters.m_StrideX = strideX;
2270 data.m_Parameters.m_StrideY = strideY;
2271 data.m_Parameters.m_PadLeft = padLeft;
2272 data.m_Parameters.m_PadRight = padRight;
2273 data.m_Parameters.m_PadTop = padTop;
2274 data.m_Parameters.m_PadBottom = padBottom;
2275 data.m_Parameters.m_BiasEnabled = biasEnabled;
2276 data.m_Parameters.m_DataLayout = layout;
2277 data.m_Parameters.m_DilationX = dilationX;
2278 data.m_Parameters.m_DilationY = dilationY;
2279
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002280 std::unique_ptr<armnn::IWorkload> workload
2281 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002282 inputHandle->Allocate();
2283 outputHandle->Allocate();
2284
Sadik Armagan483c8112021-06-01 09:24:52 +01002285 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002286
2287 ExecuteWorkload(*workload, memoryManager);
2288
Sadik Armagan483c8112021-06-01 09:24:52 +01002289 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002290
Sadik Armagan483c8112021-06-01 09:24:52 +01002291 return LayerTestResult<T, 4>(actualOutput,
2292 outputData,
2293 outputHandle->GetShape(),
2294 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002295}
2296
2297template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2298 typename T = armnn::ResolveType<ArmnnType>>
2299LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
2300 armnn::IWorkloadFactory& workloadFactory,
2301 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002302 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002303 float qScale,
2304 int32_t qOffset,
2305 bool biasEnabled,
2306 const armnn::DataLayout layout)
2307{
2308 // Use a single-batch 2-channel 5x5 image as input.
2309 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002310 auto input = QuantizedVector<T>(
2311 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002312 0, 1, 2, 3, 4,
2313 5, 6, 7, 8, 9,
2314 10, 11, 12, 13, 14,
2315 15, 16, 17, 18, 19,
2316 20, 21, 22, 23, 24,
2317
2318 25, 26, 27, 28, 29,
2319 30, 31, 32, 33, 34,
2320 35, 36, 37, 38, 39,
2321 40, 41, 42, 43, 44,
2322 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002323 },
2324 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002325 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002326
2327 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Jan Eilers53ef7952021-06-02 12:01:25 +01002328 // Weights layout for depthwise: [1,H,W,I*M]
2329 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2330 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002331 32, 31, 30, 29,
2332 28, 27, 26, 25,
2333 24, 23, 22, 21,
2334 20, 19, 18, 17,
2335
2336 16, 15, 14, 13,
2337 12, 11, 10, 9,
2338 8, 7, 6, 5,
2339 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002340 },
2341 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002342 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002343
2344 // Expected output is 1 batch of a 2-channel 5x5 image.
2345 // Calculated using the python tensorflow library with strideX=1, strideY=1.
2346 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002347 auto expectedOutput = QuantizedVector<T>(
2348 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002349 396, 664, 820, 756, 602, 1016, 1608, 1880, 1652, 1268, 1976, 2968, 3240, 2732,
2350 2028, 2628, 3808, 4060, 3312, 2390, 2596, 3700, 3900, 3130, 2226, 2817, 4186,
2351 4330, 3609, 2651, 5414, 7864, 8120, 6626, 4780, 6314, 9144, 9400, 7646, 5500,
2352 6759, 9610, 9850, 7875, 5579, 5935, 8348, 8540, 6757, 4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002353 },
2354 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002355 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002356
2357 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2358 workloadFactory,
2359 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002360 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002361 input,
2362 kernel,
2363 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2364 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002365 inputTensorInfo.GetShape(),
2366 kernelTensorInfo.GetShape(),
2367 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002368 qScale,
2369 qOffset,
2370 layout,
2371 1, // Padding left.
2372 1, // Padding top.
2373 2, // Padding right.
2374 2, // Padding bottom.
2375 1, // strideX
2376 1); // strideY
2377}
2378
2379template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2380 typename T = armnn::ResolveType<ArmnnType>>
2381LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
2382 armnn::IWorkloadFactory& workloadFactory,
2383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002384 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002385 float qScale,
2386 int32_t qOffset,
2387 bool biasEnabled)
2388{
2389 auto layout = armnn::DataLayout::NHWC;
2390
2391 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002392 auto input = QuantizedVector<T>(
2393 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002394 0, 1, 2, 3, 4,
2395 5, 6, 7, 8, 9,
2396 10, 11, 12, 13, 14,
2397 15, 16, 17, 18, 19,
2398 20, 21, 22, 23, 24,
2399
2400 25, 26, 27, 28, 29,
2401 30, 31, 32, 33, 34,
2402 35, 36, 37, 38, 39,
2403 40, 41, 42, 43, 44,
2404 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002405 },
2406 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002407 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002408
Jan Eilers53ef7952021-06-02 12:01:25 +01002409 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2410 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002411 32, 31, 30, 29,
2412 28, 27, 26, 25,
2413 24, 23, 22, 21,
2414 20, 19, 18, 17,
2415
2416 16, 15, 14, 13,
2417 12, 11, 10, 9,
2418 8, 7, 6, 5,
2419 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002420 },
2421 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002422 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002423
2424 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002425 auto expectedOutput = QuantizedVector<T>(
2426 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002427 396,664,820,756,602,
2428 1016,1608,1880,1652,1268,
2429 1976,2968,3240,2732,2028,
2430 2628,3808,4060,3312,2390,
2431 2596,3700,3900,3130,2226,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002432
Jan Eilers53ef7952021-06-02 12:01:25 +01002433 2817,4186,4330,3609,2651,
2434 5414,7864,8120,6626,4780,
2435 6314,9144,9400,7646,5500,
2436 6759,9610,9850,7875,5579,
2437 5935,8348,8540,6757,4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002438 },
2439 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002440 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002441
2442 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2443 workloadFactory,
2444 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002445 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002446 input,
2447 kernel,
2448 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2449 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002450 inputTensorInfo.GetShape(),
2451 kernelTensorInfo.GetShape(),
2452 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002453 qScale,
2454 qOffset,
2455 layout,
2456 1, // Padding left.
2457 1, // Padding top.
2458 2, // Padding right.
2459 2, // Padding bottom.
2460 1, // strideX
2461 1); // strideY
2462}
2463
2464template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2465 typename T = armnn::ResolveType<ArmnnType>>
2466LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
2467 armnn::IWorkloadFactory& workloadFactory,
2468 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002469 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002470 float qScale,
2471 int32_t qOffset,
2472 bool biasEnabled)
2473{
2474 auto layout = armnn::DataLayout::NHWC;
2475
Sadik Armagan483c8112021-06-01 09:24:52 +01002476 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
2477 auto input = QuantizedVector<T>(
2478 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002479 0, 0, 0, 0, 0, 0, 0, 0, 0,
2480 0, 0, 0, 0, 0, 0, 0, 0, 0,
2481 0, 0, 0, 0, 0, 0, 0, 0, 0,
2482 0, 0, 0, 1, 1, 1, 0, 0, 0,
2483 0, 0, 0, 1, 1, 1, 0, 0, 0,
2484 0, 0, 0, 1, 1, 1, 0, 0, 0,
2485 0, 0, 0, 0, 0, 0, 0, 0, 0,
2486 0, 0, 0, 0, 0, 0, 0, 0, 0,
2487 0, 0, 0, 0, 0, 0, 0, 0, 0
2488 },
2489 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002490 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002491
Jan Eilers53ef7952021-06-02 12:01:25 +01002492 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
2493 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002494 1, 2, 3,
2495 4, 5, 6,
2496 7, 8, 9
2497 },
2498 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002499 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002500
2501 uint32_t padLeft = 0;
2502 uint32_t padTop = 0;
2503 uint32_t padRight = 0;
2504 uint32_t padBottom = 0;
2505 uint32_t strideX = 1;
2506 uint32_t strideY = 1;
2507 uint32_t dilationX = 3;
2508 uint32_t dilationY = 3;
2509
2510 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Sadik Armagan483c8112021-06-01 09:24:52 +01002511 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
2512 auto expectedOutput = QuantizedVector<T>(
2513 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002514 5, 5, 5,
2515 5, 5, 5,
2516 5, 5, 5
2517 },
2518 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002519 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002520
2521 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2522 workloadFactory,
2523 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002524 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002525 input,
2526 kernel,
2527 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2528 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002529 inputTensorInfo.GetShape(),
2530 kernelTensorInfo.GetShape(),
2531 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002532 qScale,
2533 qOffset,
2534 layout,
2535 padLeft,
2536 padTop,
2537 padRight,
2538 padBottom,
2539 strideX,
2540 strideY,
2541 dilationX,
2542 dilationY);
2543}
2544
2545template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
2546LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
2547 armnn::IWorkloadFactory& workloadFactory,
2548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002549 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002550 const std::vector<float>& inputNoQuantizedValues,
2551 armnn::TensorInfo& inputTensorInfo,
2552 const std::vector<float>& kernelNoQuantizedValues,
2553 armnn::TensorInfo& kernelTensorInfo,
2554 const std::vector<float>& outputExpectedNoQuantizedValues,
2555 armnn::TensorInfo& outputTensorInfo,
2556 uint32_t dilationX,
2557 uint32_t dilationY,
2558 armnn::DataLayout layout = armnn::DataLayout::NCHW,
2559 bool biasEnabled = false)
2560{
2561 float qScale;
2562 int32_t qOffset;
2563 switch (ArmnnType)
2564 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002565 case armnn::DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +00002566 case armnn::DataType::QAsymmU8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002567 {
2568 qScale = 0.1f;
2569 qOffset = 128;
2570 break;
2571 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00002572 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002573 {
2574 qScale = 0.1f;
2575 qOffset = 0;
2576 break;
2577 }
2578 case armnn::DataType::Float32:
2579 default:
2580 {
2581 qScale = 0.f;
2582 qOffset = 0;
2583 break;
2584 }
2585 }
2586
2587 inputTensorInfo.SetQuantizationScale(qScale);
2588 inputTensorInfo.SetQuantizationOffset(qOffset);
2589 kernelTensorInfo.SetQuantizationScale(qScale);
2590 kernelTensorInfo.SetQuantizationOffset(qOffset);
2591 outputTensorInfo.SetQuantizationScale(qScale);
2592 outputTensorInfo.SetQuantizationOffset(qOffset);
2593
Sadik Armagan483c8112021-06-01 09:24:52 +01002594 auto input = QuantizedVector<T>(inputNoQuantizedValues,
2595 inputTensorInfo.GetQuantizationScale(),
2596 inputTensorInfo.GetQuantizationOffset());
2597 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
2598 kernelTensorInfo.GetQuantizationScale(),
2599 kernelTensorInfo.GetQuantizationOffset());
2600 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
2601 outputTensorInfo.GetQuantizationScale(),
2602 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002603
2604 uint32_t padLeft = 0;
2605 uint32_t padTop = 0;
2606 uint32_t padRight = 0;
2607 uint32_t padBottom = 0;
2608 uint32_t strideX = 1;
2609 uint32_t strideY = 1;
2610
2611 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2612 workloadFactory,
2613 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002614 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002615 input,
2616 kernel,
2617 GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2618 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002619 inputTensorInfo.GetShape(),
2620 kernelTensorInfo.GetShape(),
2621 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002622 qScale,
2623 qOffset,
2624 layout,
2625 padLeft,
2626 padTop,
2627 padRight,
2628 padBottom,
2629 strideX,
2630 strideY,
2631 dilationX,
2632 dilationY);
2633}
2634
2635template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2636LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
2637 armnn::IWorkloadFactory& workloadFactory,
2638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002639 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002640 bool biasEnabled,
2641 const armnn::DataLayout layout)
2642{
2643 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
2644 std::vector<float> inputNoQuantizedValues =
2645 {
2646 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2647 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2648 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2649 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2650 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2651 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2652 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2653 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2654 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2655 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2656 };
2657
Jan Eilers53ef7952021-06-02 12:01:25 +01002658 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002659 std::vector<float> kernelNoQuantizedValues =
2660 {
2661 1, 2, 3,
2662 4, 5, 6,
2663 7, 8, 9
2664 };
2665
2666 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2667 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2668 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
2669 std::vector<float> outputExpectedNoQuantizedValues =
2670 {
2671 6., 5., 5., 5.,
2672 6., 5., 5., 5.,
2673 6., 5., 5., 5.,
2674 3., 2., 2., 2.
2675 };
2676
2677 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2678 workloadFactory,
2679 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002680 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002681 inputNoQuantizedValues,
2682 inputTensorInfo,
2683 kernelNoQuantizedValues,
2684 kernelTensorInfo,
2685 outputExpectedNoQuantizedValues,
2686 outputTensorInfo,
2687 3,
2688 3,
2689 layout,
2690 biasEnabled);
2691}
2692
2693template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2694LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
2695 armnn::IWorkloadFactory& workloadFactory,
2696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002697 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002698 bool biasEnabled,
2699 const armnn::DataLayout layout)
2700{
2701 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
2702 std::vector<float> inputNoQuantizedValues =
2703 {
2704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2705 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2706 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2707 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2708 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2709 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2710 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2711 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2713 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2714
2715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2717 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2718 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2719 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2720 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2721 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2722 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2723 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2724 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2725 };
2726
Jan Eilers53ef7952021-06-02 12:01:25 +01002727 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 2}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002728 std::vector<float> kernelNoQuantizedValues =
2729 {
2730 1, 2, 3,
2731 4, 5, 6,
2732 7, 8, 9,
2733
2734 1, 2, 3,
2735 4, 5, 6,
2736 7, 8, 9
2737 };
2738
2739 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2740 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2741 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
2742 std::vector<float> outputExpectedNoQuantizedValues =
2743 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002744 2, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 5, 3, 3, 3, 3,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002745
Jan Eilers53ef7952021-06-02 12:01:25 +01002746 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 6, 4, 4, 4
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002747 };
2748
2749 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2750 workloadFactory,
2751 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002752 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002753 inputNoQuantizedValues,
2754 inputTensorInfo,
2755 kernelNoQuantizedValues,
2756 kernelTensorInfo,
2757 outputExpectedNoQuantizedValues,
2758 outputTensorInfo,
2759 3,
2760 3,
2761 layout,
2762 biasEnabled);
2763}
2764
2765template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2766LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
2767 armnn::IWorkloadFactory& workloadFactory,
2768 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002769 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002770 bool biasEnabled,
2771 const armnn::DataLayout layout)
2772{
2773 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2774 std::vector<float> inputNoQuantizedValues =
2775 {
2776 10.0, 10.0, 10.0,
2777 10.0, 10.0, 10.0,
2778 10.0, 10.0, 10.0,
2779
2780 21.0, 22.0, 23.0,
2781 24.0, 25.0, 26.0,
2782 27.0, 28.0, 29.0
2783 };
2784
Jan Eilers53ef7952021-06-02 12:01:25 +01002785 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 8}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002786
2787 std::vector<float> kernelNoQuantizedValues =
2788 {
2789 0.25f, 0.25f,
2790 0.25f, 0.25f,
2791
2792 0.25f, 0.25f,
2793 0.25f, 0.25f,
2794
2795 0.0f , 0.0f,
2796 0.0f , 0.1f,
2797
2798 0.0f , 0.0f,
2799 0.0f , 0.1f,
2800
2801 0.2f , 0.0f,
2802 0.0f , 0.0f,
2803
2804 0.2f , 0.0f,
2805 0.0f , 0.0f,
2806
2807 0.0f , 0.3f,
2808 0.0f , 0.0f,
2809
2810 0.0f , 0.3f,
2811 0.0f , 0.0f
2812 };
2813
2814 armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType);
2815 std::vector<float> outputExpectedNoQuantizedValues =
2816 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002817 4.5f, 4.5f, 4.5f, 4.5f, 5.5f, 5.5f, 5.5f, 5.5f,
2818 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f,
2819 10.05f, 10.5f, 11.4f, 11.85f, 12.75f, 13.3f, 14.4f, 14.95f,
2820 5.25f, 5.5f, 6.0f, 6.25f, 7.45f, 7.8f, 8.5f, 8.85f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002821 };
2822
2823
2824 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2825 workloadFactory,
2826 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002827 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002828 inputNoQuantizedValues,
2829 inputTensorInfo,
2830 kernelNoQuantizedValues,
2831 kernelTensorInfo,
2832 outputExpectedNoQuantizedValues,
2833 outputTensorInfo,
2834 1,
2835 1,
2836 layout,
2837 biasEnabled);
2838}
2839
2840template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2841LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
2842 armnn::IWorkloadFactory& workloadFactory,
2843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002844 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002845 bool biasEnabled,
2846 const armnn::DataLayout layout)
2847{
2848 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2849 std::vector<float> inputNoQuantizedValues =
2850 {
2851 10.0, 10.0, 10.0,
2852 10.0, 10.0, 10.0,
2853 10.0, 10.0, 10.0,
2854
2855 21.0, 22.0, 23.0,
2856 24.0, 25.0, 26.0,
2857 27.0, 28.0, 29.0
2858 };
2859
Jan Eilers53ef7952021-06-02 12:01:25 +01002860 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 4}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002861
2862 std::vector<float> kernelNoQuantizedValues =
2863 {
2864 0.25f, 0.25f,
2865 0.25f, 0.25f,
2866
2867 0.2f , 0.0f,
2868 0.0f , 0.0f,
2869
2870 0.0f , 0.0f,
2871 0.0f , 0.1f,
2872
2873 0.0f , 0.3f,
2874 0.0f , 0.0f
2875
2876 };
2877
2878 armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType);
2879 std::vector<float> outputExpectedNoQuantizedValues =
2880 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002881 4.5f, 4.5f, 4.5f, 4.5f,
2882 5.5f, 5.5f, 5.5f, 5.5f,
2883 5.25f, 5.5f, 6.0f, 6.25f,
2884 7.65f, 8.0f, 8.7f, 9.05f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002885 };
2886
2887
2888 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2889 workloadFactory,
2890 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002891 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002892 inputNoQuantizedValues,
2893 inputTensorInfo,
2894 kernelNoQuantizedValues,
2895 kernelTensorInfo,
2896 outputExpectedNoQuantizedValues,
2897 outputTensorInfo,
2898 1,
2899 1,
2900 layout,
2901 biasEnabled);
2902}
2903
2904template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
2905LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
2906 armnn::IWorkloadFactory& workloadFactory,
2907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
2908 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01002909 const armnn::ITensorHandleFactory& tensorHandleFactory,
2910 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002911 const armnnUtils::DataLayoutIndexed& layout)
2912{
2913 unsigned int inputHeight = 8;
2914 unsigned int inputWidth = 16;
2915 unsigned int inputChannels = 3;
2916 unsigned int inputNum = 5;
2917
2918 unsigned int kernelHeight = 3;
2919 unsigned int kernelWidth = 3;
2920 unsigned int channelMultiplier = 1;
2921
2922 unsigned int strideX = 2;
2923 unsigned int strideY = 3;
2924 unsigned int padX = 1;
2925 unsigned int padY = 1;
2926
2927 unsigned int outputNum = inputNum;
2928 unsigned int outputChannels = inputChannels * channelMultiplier;
2929 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
2930 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
2931
2932 armnn::TensorInfo inputTensorInfo;
2933 armnn::TensorInfo outputTensorInfo;
2934 armnn::TensorInfo kernelDesc;
2935 armnn::TensorInfo biasDesc;
2936
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002937 std::vector<unsigned int> inputShape;
2938 std::vector<unsigned int> outputShape;
Jan Eilers53ef7952021-06-02 12:01:25 +01002939 std::vector<unsigned int> kernelShape{ 1, kernelHeight, kernelWidth, outputChannels };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002940 std::vector<unsigned int> biasShape{ outputChannels };
2941 switch (layout.GetDataLayout())
2942 {
2943 case armnn::DataLayout::NCHW:
2944 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
2945 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
2946 break;
2947 case armnn::DataLayout ::NHWC:
2948 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
2949 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
2950 break;
2951 default:
2952 throw armnn::InvalidArgumentException("unknown data layout ["
2953 + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
2954 }
2955
2956 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
2957 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
2958 int32_t qOffset = 0;
2959
2960 inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
2961 outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
2962 kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
Sadik Armagan483c8112021-06-01 09:24:52 +01002963 biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002964
Sadik Armagan483c8112021-06-01 09:24:52 +01002965 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
2966 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
2967 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002968
Sadik Armagan483c8112021-06-01 09:24:52 +01002969 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2970 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01002971
2972 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2973 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2974
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002975 armnn::DepthwiseConvolution2dQueueDescriptor data;
2976 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +01002977 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2978 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002979
Sadik Armagan483c8112021-06-01 09:24:52 +01002980 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
2981 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002982
2983 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2984 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2985 data.m_Weight = &weightsTensor;
2986 data.m_Bias = &biasTensor;
2987 data.m_Parameters.m_StrideX = strideX;
2988 data.m_Parameters.m_StrideY = strideY;
2989 data.m_Parameters.m_PadLeft = padX;
2990 data.m_Parameters.m_PadRight = padX;
2991 data.m_Parameters.m_PadTop = padY;
2992 data.m_Parameters.m_PadBottom = padY;
2993 data.m_Parameters.m_BiasEnabled = true;
2994 data.m_Parameters.m_DataLayout = layout.GetDataLayout();
Keith Davisf500d6c2020-08-31 08:32:55 +01002995
2996 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2997 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2998
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002999 armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
3000 armnn::WorkloadInfo refInfo = info;
3001 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
3002 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3003
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003004 std::unique_ptr<armnn::IWorkload> workload
3005 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
3006 std::unique_ptr<armnn::IWorkload> workloadRef
3007 = refWorkloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003008
3009 outputHandleRef->Allocate();
3010 inputHandleRef->Allocate();
3011
3012 inputHandle->Allocate();
3013 outputHandle->Allocate();
3014
Sadik Armagan483c8112021-06-01 09:24:52 +01003015 CopyDataToITensorHandle(inputHandle.get(), input.data());
3016 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003017
3018 ExecuteWorkload(*workload, memoryManager);
3019
3020 workloadRef->PostAllocationConfigure();
3021 workloadRef->Execute();
3022
Sadik Armagan483c8112021-06-01 09:24:52 +01003023 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3024 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003025
Sadik Armagan483c8112021-06-01 09:24:52 +01003026 return LayerTestResult<T, 4>(actualOutput,
3027 expectedOutput,
3028 outputHandle->GetShape(),
3029 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003030}
3031
3032//
3033// Explicit template specializations
3034//
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003035template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3036Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3037 armnn::IWorkloadFactory&,
3038 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003039 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003040 bool,
3041 armnn::DataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003042
3043template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3044Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3045 armnn::IWorkloadFactory&,
3046 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003047 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003048 bool,
3049 armnn::DataLayout);
3050
Sadik Armagan303980c2020-04-17 12:45:14 +01003051template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3052Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3053 armnn::IWorkloadFactory&,
3054 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003055 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003056 bool,
3057 armnn::DataLayout);
3058
Derek Lambertif90c56d2020-01-10 17:14:08 +00003059template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3060Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003061 armnn::IWorkloadFactory&,
3062 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003063 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003064 bool,
3065 armnn::DataLayout);
3066
Derek Lambertif90c56d2020-01-10 17:14:08 +00003067template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3068Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003069 armnn::IWorkloadFactory&,
3070 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003071 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003072 bool,
3073 armnn::DataLayout);
3074
3075template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3076Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3077 armnn::IWorkloadFactory&,
3078 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003079 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003080 bool,
3081 armnn::DataLayout);
3082
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003083template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3084Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3085 armnn::IWorkloadFactory&,
3086 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003087 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003088 bool,
3089 armnn::DataLayout);
3090
Sadik Armagan303980c2020-04-17 12:45:14 +01003091template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3092Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3093 armnn::IWorkloadFactory&,
3094 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003095 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003096 bool,
3097 armnn::DataLayout);
3098
Derek Lambertif90c56d2020-01-10 17:14:08 +00003099template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3100Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003101 armnn::IWorkloadFactory&,
3102 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003103 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003104 bool,
3105 armnn::DataLayout);
3106
Derek Lambertif90c56d2020-01-10 17:14:08 +00003107template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3108Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003109 armnn::IWorkloadFactory&,
3110 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003111 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003112 bool,
3113 armnn::DataLayout);
3114
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003115template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3116Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3117 armnn::IWorkloadFactory &workloadFactory,
3118 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003119 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003120 bool biasEnabled,
3121 const armnn::DataLayout layout);
3122
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003123template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3124Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3125 armnn::IWorkloadFactory &workloadFactory,
3126 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003127 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003128 bool biasEnabled,
3129 const armnn::DataLayout layout);
3130
Sadik Armagan303980c2020-04-17 12:45:14 +01003131template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3132Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3133 armnn::IWorkloadFactory &workloadFactory,
3134 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003135 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +01003136 bool biasEnabled,
3137 const armnn::DataLayout layout);
3138
Derek Lambertif90c56d2020-01-10 17:14:08 +00003139template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3140Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003141 armnn::IWorkloadFactory &workloadFactory,
3142 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003143 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003144 bool biasEnabled,
3145 const armnn::DataLayout layout);
3146
Derek Lambertif90c56d2020-01-10 17:14:08 +00003147template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3148Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003149 armnn::IWorkloadFactory &workloadFactory,
3150 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003151 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003152 bool biasEnabled,
3153 const armnn::DataLayout layout);
3154
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003155template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3156DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3157 armnn::IWorkloadFactory&,
3158 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003159 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003160 bool,
3161 armnn::DataLayout);
3162
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003163template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3164DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3165 armnn::IWorkloadFactory&,
3166 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003167 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003168 bool,
3169 armnn::DataLayout);
3170
Sadik Armagan303980c2020-04-17 12:45:14 +01003171template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3172DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3173 armnn::IWorkloadFactory&,
3174 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003175 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003176 bool,
3177 armnn::DataLayout);
3178
Derek Lambertif90c56d2020-01-10 17:14:08 +00003179template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3180DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003181 armnn::IWorkloadFactory&,
3182 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003183 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003184 bool,
3185 armnn::DataLayout);
3186
Derek Lambertif90c56d2020-01-10 17:14:08 +00003187template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3188DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003189 armnn::IWorkloadFactory&,
3190 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003191 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003192 bool,
3193 armnn::DataLayout);
3194
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003195template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3196DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3197 armnn::IWorkloadFactory&,
3198 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003199 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003200 bool,
3201 armnn::DataLayout);
3202
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003203template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3204DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3205 armnn::IWorkloadFactory&,
3206 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003207 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003208 bool,
3209 armnn::DataLayout);
3210
Sadik Armagan303980c2020-04-17 12:45:14 +01003211template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3212DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3213 armnn::IWorkloadFactory&,
3214 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003215 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003216 bool,
3217 armnn::DataLayout);
3218
Derek Lambertif90c56d2020-01-10 17:14:08 +00003219template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3220DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003221 armnn::IWorkloadFactory&,
3222 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003223 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003224 bool,
3225 armnn::DataLayout);
3226
Derek Lambertif90c56d2020-01-10 17:14:08 +00003227template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3228DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003229 armnn::IWorkloadFactory&,
3230 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003231 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003232 bool,
3233 armnn::DataLayout);
3234
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003235template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3236DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3237 armnn::IWorkloadFactory &workloadFactory,
3238 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003239 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003240 bool biasEnabled,
3241 const armnn::DataLayout layout);
3242
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003243template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3244DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3245 armnn::IWorkloadFactory &workloadFactory,
3246 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003247 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003248 bool biasEnabled,
3249 const armnn::DataLayout layout);
3250
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003251template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3252DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3253 armnn::IWorkloadFactory &workloadFactory,
3254 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003255 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003256 bool biasEnabled,
3257 const armnn::DataLayout layout);
3258
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003259template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3260DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3261 armnn::IWorkloadFactory &workloadFactory,
3262 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003263 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003264 bool biasEnabled,
3265 const armnn::DataLayout layout);
3266
3267//
3268// Implementation functions
3269//
3270
3271LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
3272 armnn::IWorkloadFactory& workloadFactory,
3273 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003274 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003275 bool biasEnabled,
3276 const armnn::DataLayout layout)
3277{
3278 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003279 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003280}
3281
3282LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
3283 armnn::IWorkloadFactory& workloadFactory,
3284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003285 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003286 bool biasEnabled,
3287 const armnn::DataLayout layout)
3288{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003289 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003290 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003291}
3292
3293LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
3294 armnn::IWorkloadFactory& workloadFactory,
3295 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003296 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003297 bool biasEnabled,
3298 const armnn::DataLayout layout)
3299{
3300 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003301 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003302}
3303
3304LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
3305 armnn::IWorkloadFactory& workloadFactory,
3306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003307 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003308 bool biasEnabled)
3309{
3310 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3311 workloadFactory,
3312 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003313 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003314 0.f,
3315 0,
3316 biasEnabled,
3317 armnn::DataLayout::NHWC);
3318}
3319
3320LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
3321 armnn::IWorkloadFactory& workloadFactory,
3322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003323 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003324 bool biasEnabled,
3325 const armnn::DataLayout layout)
3326{
3327 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3328 workloadFactory,
3329 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003330 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003331 0.f,
3332 0,
3333 biasEnabled,
3334 layout);
3335}
3336
3337LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
3338 armnn::IWorkloadFactory& workloadFactory,
3339 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003340 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003341 bool biasEnabled,
3342 const armnn::DataLayout layout)
3343{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003344 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003345 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003346}
3347
3348LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
3349 armnn::IWorkloadFactory& workloadFactory,
3350 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003351 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003352 bool biasEnabled,
3353 const armnn::DataLayout layout)
3354{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003355 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003356 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003357}
3358
3359LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
3360 armnn::IWorkloadFactory& workloadFactory,
3361 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003362 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003363 bool biasEnabled,
3364 const armnn::DataLayout layout)
3365{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003366 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003367 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003368}
3369
3370LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
3371 armnn::IWorkloadFactory& workloadFactory,
3372 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003373 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003374 armnn::DataLayout layout)
3375{
3376 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003377 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003378}
3379
3380LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
3381 armnn::IWorkloadFactory& workloadFactory,
3382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003383 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003384 armnn::DataLayout layout)
3385{
3386 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
3387 <armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003388 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003389}
3390
3391LayerTestResult<float, 4> Convolution1dTest(
3392 armnn::IWorkloadFactory& workloadFactory,
3393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003394 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003395 bool biasEnabled)
3396{
3397 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003398 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003399}
3400
3401LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
3402 armnn::IWorkloadFactory& workloadFactory,
3403 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003404 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003405 bool biasEnabled)
3406{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003407 return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003408 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003409}
3410
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003411LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
3412 armnn::IWorkloadFactory& workloadFactory,
3413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003414 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003415 const armnn::DataLayout layout)
3416{
3417 using namespace armnn;
3418
Derek Lambertif90c56d2020-01-10 17:14:08 +00003419 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003420 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003421 const DataType biasType = DataType::Signed32;
3422
3423 TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3424 TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3425
3426 const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3427 constexpr unsigned int quantDimension = 0;
3428
3429 TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3430
3431 const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3432 TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3433
3434 std::vector<uint8_t> inputData =
3435 {
3436 138, 108, 138, 108, 138, 108
3437 };
3438
3439 std::vector<int8_t> kernelData =
3440 {
3441 1, 2, 1, 2, 1, 2
3442 };
3443
3444 std::vector<int32_t> biasData =
3445 {
3446 4, 4, 4
3447 };
3448
3449 std::vector<uint8_t> expectedOutputData =
3450 {
3451 121, 118, 115, 121, 118, 115, 121, 118, 115
3452 };
3453
3454 if (layout == DataLayout::NCHW)
3455 {
3456 PermuteTensorNhwcToNchw(inputInfo, inputData);
3457 PermuteTensorNhwcToNchw(kernelInfo, kernelData);
3458 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3459 }
3460
Sadik Armagan483c8112021-06-01 09:24:52 +01003461 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3462
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003463 Convolution2dDescriptor descriptor;
3464 descriptor.m_StrideX = 1;
3465 descriptor.m_StrideY = 1;
3466 descriptor.m_PadLeft = 0;
3467 descriptor.m_PadRight = 0;
3468 descriptor.m_PadTop = 0;
3469 descriptor.m_PadBottom = 0;
3470 descriptor.m_BiasEnabled = true;
3471 descriptor.m_DataLayout = layout;
3472
Keith Davisf500d6c2020-08-31 08:32:55 +01003473 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3474 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
3475
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003476 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +01003477 ScopedTensorHandle weightTensor(kernelInfo);
3478 ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003479
3480 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3481 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3482
3483 Convolution2dQueueDescriptor queueDescriptor;
3484 queueDescriptor.m_Parameters = descriptor;
3485 queueDescriptor.m_Weight = &weightTensor;
3486 queueDescriptor.m_Bias = &biasTensor;
3487
3488 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3489 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3490
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003491 std::unique_ptr<IWorkload> workload= workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
3492 queueDescriptor,
3493 workloadInfo);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003494 inputHandle->Allocate();
3495 outputHandle->Allocate();
3496
3497 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3498
3499 ExecuteWorkload(*workload, memoryManager);
3500
Sadik Armagan483c8112021-06-01 09:24:52 +01003501 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003502
Sadik Armagan483c8112021-06-01 09:24:52 +01003503 return LayerTestResult<uint8_t, 4>(actualOutput,
3504 expectedOutputData,
3505 outputHandle->GetShape(),
3506 outputInfo.GetShape());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003507}
3508
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003509LayerTestResult<float,4> CompareConvolution2dTest(
3510 armnn::IWorkloadFactory& workloadFactory,
3511 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003512 armnn::IWorkloadFactory& refWorkloadFactory,
3513 const armnn::ITensorHandleFactory& tensorHandleFactory,
3514 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003515{
3516 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003517 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003518}
3519
3520LayerTestResult<float, 4> DepthwiseConvolution2dTest(
3521 armnn::IWorkloadFactory& workloadFactory,
3522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003523 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003524 bool biasEnabled,
3525 const armnn::DataLayout layout)
3526{
3527 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003528 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003529}
3530
3531LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
3532 armnn::IWorkloadFactory& workloadFactory,
3533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003534 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003535 bool biasEnabled)
3536{
3537 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003538 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003539}
3540
3541LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
3542 armnn::IWorkloadFactory& workloadFactory,
3543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003544 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003545 bool biasEnabled,
3546 const armnn::DataLayout layout)
3547{
3548 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003549 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003550}
3551
3552LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
3553 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3555 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003556{
3557 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
Sadik Armagan483c8112021-06-01 09:24:52 +01003558 std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003559
3560 std::vector<float> kernelData;
3561 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3562 for (unsigned int i = 0; i < 64; ++i)
3563 {
3564 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3565 }
3566 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003567
Jan Eilers53ef7952021-06-02 12:01:25 +01003568 // permute from [O,1,H,W] --> [1,H,W,O]
3569 armnn::PermutationVector permutationVector {3,0,1,2};
3570 kernelTensorInfo = armnnUtils::Permuted(kernelTensorInfo, permutationVector);
3571 std::vector<float> kernelPermuted(kernelTensorInfo.GetNumElements());
3572 armnnUtils::Permute(kernelTensorInfo.GetShape(), permutationVector,
3573 kernelData.data(), kernelPermuted.data(),
3574 GetDataTypeSize(kernelTensorInfo.GetDataType()));
3575
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003576 std::vector<float> expectedOutputData(64, 0.f);
3577 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003578
3579 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3580 workloadFactory,
3581 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003582 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003583 input,
Jan Eilers53ef7952021-06-02 12:01:25 +01003584 kernelPermuted,
Sadik Armagan483c8112021-06-01 09:24:52 +01003585 std::vector<float>(),
3586 expectedOutputData,
3587 inputTensorInfo.GetShape(),
3588 kernelTensorInfo.GetShape(),
3589 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003590 0.f,
3591 0,
3592 armnn::DataLayout::NCHW);
3593}
3594
3595LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
3596 armnn::IWorkloadFactory& workloadFactory,
3597 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003598 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003599 bool biasEnabled,
3600 const armnn::DataLayout layout)
3601{
3602 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003603 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003604}
3605
3606LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
3607 armnn::IWorkloadFactory& workloadFactory,
3608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003609 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003610 bool biasEnabled,
3611 const armnn::DataLayout layout)
3612{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003613 return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003614 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003615}
3616
3617LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
3618 armnn::IWorkloadFactory& workloadFactory,
3619 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003620 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003621 bool biasEnabled,
3622 const armnn::DataLayout layout)
3623{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003624 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003625 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003626}
3627
3628LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
3629 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3631 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003632{
3633 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3634 workloadFactory,
3635 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003636 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003637 0.f,
3638 0,
3639 false);
3640}
3641
3642LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
3643 armnn::IWorkloadFactory& workloadFactory,
3644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003645 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003646 bool biasEnabled,
3647 const armnn::DataLayout layout)
3648{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003649 return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003650 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003651}
3652
3653LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
3654 armnn::IWorkloadFactory& workloadFactory,
3655 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003656 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003657 bool biasEnabled,
3658 const armnn::DataLayout layout)
3659{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003660 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003661 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003662}
3663
Teresa Charlind8df0262019-11-11 12:28:15 +00003664LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
3665 armnn::IWorkloadFactory& workloadFactory,
3666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003667 const armnn::ITensorHandleFactory& tensorHandleFactory,
Teresa Charlind8df0262019-11-11 12:28:15 +00003668 const armnn::DataLayout layout)
3669{
3670 using namespace armnn;
3671
Derek Lambertif90c56d2020-01-10 17:14:08 +00003672 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003673 const DataType kernelType = DataType::QSymmS8;
Teresa Charlind8df0262019-11-11 12:28:15 +00003674 const DataType biasType = DataType::Signed32;
3675
3676 TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128); // N H W C
3677 TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128); // N H W C
3678
3679 const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
Jan Eilers53ef7952021-06-02 12:01:25 +01003680 const unsigned int quantDimension = 3;
3681 TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension); // [1, H, W, I*M]
Teresa Charlind8df0262019-11-11 12:28:15 +00003682
3683 const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3684 constexpr unsigned int biasQuantDimension = 0;
3685 TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3686
3687 std::vector<uint8_t> inputData =
3688 {
3689 129, 130,
3690 129, 130,
3691 129, 130,
3692 129, 130,
3693 129, 130,
3694 129, 130,
3695 129, 130,
3696 129, 130,
3697 129, 130
3698 };
3699
3700 std::vector<int8_t> kernelData =
3701 {
3702 1, 1, 1, 1,
3703 1, 1, 1, 1,
3704 1, 1, 1, 1,
3705 1, 1, 1, 1
3706 };
3707
3708 std::vector<int32_t> biasData =
3709 {
3710 4, 4, 4, 4
3711 };
3712
3713 std::vector<uint8_t> expectedOutputData =
3714 {
3715 132, 130, 134, 131,
3716 132, 130, 134, 131,
3717 132, 130, 134, 131,
3718 132, 130, 134, 131
3719 };
3720
3721 if (layout == DataLayout::NCHW)
3722 {
3723 PermuteTensorNhwcToNchw(inputInfo, inputData);
3724 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3725 }
3726
Sadik Armagan483c8112021-06-01 09:24:52 +01003727 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3728
Teresa Charlind8df0262019-11-11 12:28:15 +00003729 DepthwiseConvolution2dDescriptor descriptor;
3730 descriptor.m_StrideX = 1;
3731 descriptor.m_StrideY = 1;
3732 descriptor.m_PadLeft = 0;
3733 descriptor.m_PadRight = 0;
3734 descriptor.m_PadTop = 0;
3735 descriptor.m_PadBottom = 0;
3736 descriptor.m_DilationX = 1;
3737 descriptor.m_DilationY = 1;
3738 descriptor.m_BiasEnabled = true;
3739 descriptor.m_DataLayout = layout;
3740
Keith Davisf500d6c2020-08-31 08:32:55 +01003741 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3742 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003743
3744 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +01003745 ScopedTensorHandle weightTensor(kernelInfo);
3746 ScopedTensorHandle biasTensor(biasInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003747
3748 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3749 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3750
3751 DepthwiseConvolution2dQueueDescriptor queueDescriptor;
3752 queueDescriptor.m_Parameters = descriptor;
3753 queueDescriptor.m_Weight = &weightTensor;
3754 queueDescriptor.m_Bias = &biasTensor;
3755
3756 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3757 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3758
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003759 std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
3760 queueDescriptor,
3761 workloadInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003762 inputHandle->Allocate();
3763 outputHandle->Allocate();
3764
3765 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3766
3767 ExecuteWorkload(*workload, memoryManager);
3768
3769 LayerTestResult<uint8_t, 4> ret(outputInfo);
3770
Sadik Armagan483c8112021-06-01 09:24:52 +01003771 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Teresa Charlind8df0262019-11-11 12:28:15 +00003772
Sadik Armagan483c8112021-06-01 09:24:52 +01003773 return LayerTestResult<uint8_t, 4>(actualOutput,
3774 expectedOutputData,
3775 outputHandle->GetShape(),
3776 outputInfo.GetShape());
Teresa Charlind8df0262019-11-11 12:28:15 +00003777}
3778
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003779LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
3780 armnn::IWorkloadFactory& workloadFactory,
3781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3782 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003783 const armnn::ITensorHandleFactory& tensorHandleFactory,
3784 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003785 const armnn::DataLayout layout)
3786{
3787 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003788 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003789}
3790
3791LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
3792 armnn::IWorkloadFactory& workloadFactory,
3793 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3794 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003795 const armnn::ITensorHandleFactory& tensorHandleFactory,
3796 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003797 const armnn::DataLayout layout)
3798{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003799 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003800 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003801}