blob: 69a04df76964d2072c9e214f3bd294471a578626 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Mike Kellyec67a0f2022-11-25 13:55:24 +00002// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "Conv2dTestImpl.hpp"
7
Colm Donelanc42a9872022-02-02 16:35:09 +00008#include <armnnUtils/QuantizeHelper.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +00009#include <armnnUtils/TensorUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010
Jan Eilers8eb25602020-03-09 12:13:48 +000011#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010012#include <armnn/utility/NumericCast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/DataLayoutIndexed.hpp>
14#include <armnnUtils/Permute.hpp>
15
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnn/backends/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <armnnTestUtils/DataLayoutUtils.hpp>
19#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000020#include <armnnTestUtils/WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021
Colm Donelanc42a9872022-02-02 16:35:09 +000022#include <armnnTestUtils/TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010024#include <string>
25
26//
27// Static data
28//
29
30// 2-channel bias used by a number of Conv2d tests.
31static std::vector<float> Bias2({0, 2});
32
33static std::vector<float> Bias4({1, 2, 3, 4});
34
35static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4});
36
37// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
38static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065using namespace armnnUtils;
66
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010067//
68// Helper templates
69//
70
71// Helper template that returns either Bias2 or an empty vector depending on whether bias is enabled.
72template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010073std::vector<T> GetBias2(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010074{
75 if(biasEnabled)
76 {
Sadik Armagan483c8112021-06-01 09:24:52 +010077 return QuantizedVector<T>(Bias2, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010078 }
79 else
80 {
Sadik Armagan483c8112021-06-01 09:24:52 +010081 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010082 }
83}
84
85// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
86template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010087std::vector<T> GetBias4(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088{
89 if(biasEnabled)
90 {
Sadik Armagan483c8112021-06-01 09:24:52 +010091 return QuantizedVector<T>(Bias4, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010092 }
93 else
94 {
Sadik Armagan483c8112021-06-01 09:24:52 +010095 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096 }
97}
98
99// Helper template that returns either Bias8 or an empty vector depending on whether bias is enabled.
100template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100101std::vector<T> GetBias8(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100102{
103 if(biasEnabled)
104 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100105 return QuantizedVector<T>(Bias8, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100106 }
107 else
108 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100109 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100110 }
111}
112
113// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
114template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100115std::vector<T> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100116{
117 const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
118 const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
119 const unsigned int outputChannels = outputInfo.GetShape()[channelsIndex];
120
121 switch (outputChannels)
122 {
123 case 2:
124 default:
125 {
126 return GetBias2<ArmnnType>(biasEnabled, qScale);
127 }
128 case 4:
129 {
130 return GetBias4<ArmnnType>(biasEnabled, qScale);
131 }
132 case 8:
133 {
134 return GetBias8<ArmnnType>(biasEnabled, qScale);
135 }
136 }
137}
138
139//
140// Implementation templates
141//
142
143// Mapping from input type to bias type for fully connected layers.
144// float => float, uint8_t => int32_t
145template<typename T>
146struct FullyConnectedBiasTypeForInputType;
147
148template<>
149struct FullyConnectedBiasTypeForInputType<float>
150{
151 using Type = float;
152};
153
154template<>
155struct FullyConnectedBiasTypeForInputType<uint8_t>
156{
157 using Type = int32_t;
158};
159
160// Modifies a std::vector in-place using a specified bias.
161template<typename T, typename B>
162void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
163 const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
164{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100165 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100166 "Invalid type and parameter combination.");
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100167 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100168 "Invalid type and parameter combination.");
169
170 // Note we need to dequantize and re-quantize the image value and the bias.
171 for (uint32_t i = 0; i < bias.size(); ++i)
172 {
173 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
174 for (uint32_t y = 0; y < h; ++y)
175 {
176 for (uint32_t x = 0; x < w; ++x)
177 {
178 uint32_t offset = (i * h + y) * w + x;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100179 ARMNN_ASSERT(offset < v.size());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100180 T& outRef = v[offset];
181 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
182 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
183 }
184 }
185 }
186}
187
188//
189// Convolution2d implementations
190//
191
192template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
193 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
194LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
195 armnn::IWorkloadFactory& workloadFactory,
196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100197 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100198 const std::vector<T>& originalInput,
199 const std::vector<T>& originalKernel,
200 const std::vector<B>& bias,
201 const std::vector<T>& originalOutputExpected,
202 const armnn::TensorShape& originalInputShape,
203 const armnn::TensorShape& originalKernelShape,
204 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100205 float qScale,
206 int32_t qOffset,
207 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
208 uint32_t padLeft = 0,
209 uint32_t padTop = 0,
210 uint32_t padRight = 0,
211 uint32_t padBottom = 0,
212 uint32_t strideX = 1,
213 uint32_t strideY = 1,
214 uint32_t dilationX = 1,
215 uint32_t dilationY = 1)
216{
Jan Eilers8eb25602020-03-09 12:13:48 +0000217 armnn::IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +0100218 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
219 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
220 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
221 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100222
Sadik Armagan483c8112021-06-01 09:24:52 +0100223 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
224 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
225 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
226 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100227
Sadik Armagan483c8112021-06-01 09:24:52 +0100228 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
229 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
230 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
231 unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100232
233 bool biasEnabled = bias.size() > 0;
234
235 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100236 ARMNN_ASSERT(inputNum == 1);
237 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100238
239 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100240 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100241
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100242 // Note these tensors will use two (identical) batches.
243 armnn::TensorInfo inputTensorInfo =
244 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
245 armnn::TensorInfo outputTensorInfo =
246 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
247 armnn::TensorInfo kernelDesc =
248 armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100249 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100250 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100251 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100252
253 // Set quantization parameters if the requested type is a quantized type.
254 if(armnn::IsQuantizedType<T>())
255 {
256 inputTensorInfo.SetQuantizationScale(qScale);
257 inputTensorInfo.SetQuantizationOffset(qOffset);
258 outputTensorInfo.SetQuantizationScale(qScale);
259 outputTensorInfo.SetQuantizationOffset(qOffset);
260 kernelDesc.SetQuantizationScale(qScale);
261 kernelDesc.SetQuantizationOffset(qOffset);
262 biasDesc.SetQuantizationScale(qScale*qScale);
263 biasDesc.SetQuantizationOffset(0);
264 }
265
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100266 // Construct input data - two batches of the same input image.
267 std::vector<T> inputImage;
268 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
269 std::vector<T> inputData;
270 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
271 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
272
273 // at this point if we require it permute the input data
274 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
275 if (layout == armnn::DataLayout::NHWC)
276 {
277 std::vector<T> tmp(inputData.size());
278 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
279 inputData = tmp;
280 }
281
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100282 std::vector<T> outputImage;
283 outputImage.assign(originalOutputExpected.data(),
284 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
285
286 // Apply bias to output image if it is enabled.
287 if(biasEnabled)
288 {
289 std::vector<T> biasV;
290 biasV.assign(bias.data(), bias.data() + outputChannels);
291 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
292 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
293 outputWidth, outputHeight);
294 }
295
Sadik Armagan483c8112021-06-01 09:24:52 +0100296 // Data will be copied from outputHandle
297 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
298
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100299 // Construct expected output data - two identical images.
Sadik Armagan483c8112021-06-01 09:24:52 +0100300 std::vector<T> expectedOutput;
301 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
302 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100303
304 // at this point if we require it permute the expected output
305 if (layout == armnn::DataLayout::NHWC)
306 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100307 std::vector<T> tmp(expectedOutput.size());
308 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T));
309 expectedOutput = tmp;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100310 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100311
312 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
313 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100314 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +0100315
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100316 armnn::Convolution2dQueueDescriptor data;
317 armnn::WorkloadInfo info;
Sadik Armagan483c8112021-06-01 09:24:52 +0100318
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100319 // Permute the kernel if necessary
Sadik Armagan483c8112021-06-01 09:24:52 +0100320 std::vector<T> kernel = originalKernel;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100321 if (layout == armnn::DataLayout::NHWC)
322 {
323 armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
324 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100325
326 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100327 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100328 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
329
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100330 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
331 if (biasEnabled)
332 {
333 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
334 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
335 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100336 data.m_Parameters.m_StrideX = strideX;
337 data.m_Parameters.m_StrideY = strideY;
338 data.m_Parameters.m_PadLeft = padLeft;
339 data.m_Parameters.m_PadRight = padRight;
340 data.m_Parameters.m_PadTop = padTop;
341 data.m_Parameters.m_PadBottom = padBottom;
342 data.m_Parameters.m_BiasEnabled = biasEnabled;
343 data.m_Parameters.m_DataLayout = layout;
344 data.m_Parameters.m_DilationX = dilationX;
345 data.m_Parameters.m_DilationY = dilationY;
346
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
348 data,
349 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100350 inputHandle->Allocate();
351 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100352 weightsHandle->Allocate();
353
354 if (biasEnabled)
355 {
356 biasHandle->Allocate();
357 CopyDataToITensorHandle(biasHandle.get(), bias.data());
358 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100359
Sadik Armagan483c8112021-06-01 09:24:52 +0100360 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100361 CopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100362
363 ExecuteWorkload(*workload, memoryManager);
364
Sadik Armagan483c8112021-06-01 09:24:52 +0100365 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100366
Sadik Armagan483c8112021-06-01 09:24:52 +0100367 return LayerTestResult<T, 4>(actualOutput,
368 expectedOutput,
369 outputHandle->GetShape(),
370 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100371}
372
373template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100374 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>,
375 armnn::DataType OutType = ArmnnType, typename O = armnn::ResolveType<OutType>>
376LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100377 armnn::IWorkloadFactory& workloadFactory,
378 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100379 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100380 const std::vector<T>& input,
381 const std::vector<T>& kernel,
382 const std::vector<B>& bias,
383 const std::vector<O>& outputExpected,
384 const armnn::TensorShape& inputShape,
385 const armnn::TensorShape& kernelShape,
386 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100387 const armnn::DataLayout dataLayout,
388 float qScale,
389 int32_t qOffset,
390 uint32_t padLeft = 1,
391 uint32_t padTop = 1,
392 uint32_t padRight = 1,
393 uint32_t padBottom = 1,
394 uint32_t strideX = 1,
395 uint32_t strideY = 1)
396{
Jan Eilers8eb25602020-03-09 12:13:48 +0000397 armnn::IgnoreUnused(qScale, qOffset);
Sadik Armagan483c8112021-06-01 09:24:52 +0100398 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
399 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[3]);
400 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[1]);
401 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100402
Sadik Armagan483c8112021-06-01 09:24:52 +0100403 unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
404 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
405 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
406 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100407
Sadik Armagan483c8112021-06-01 09:24:52 +0100408 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
409 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
410 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
411 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100412
413 bool biasEnabled = bias.size() > 0;
414
415 // Creates the tensors.
416 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100417 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, OutType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100418 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100419 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100420 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100421 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100422
423 // Construct the input data.
424 std::vector<T> inputData;
425 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100426
427 // Construct the output data, with bias applied, as appropriate.
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100428 std::vector<O> outputData;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100429 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
430
Sadik Armagan483c8112021-06-01 09:24:52 +0100431 std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +0100432
433 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
434 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100435 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
436 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +0100437
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100438 armnn::Convolution2dQueueDescriptor data;
439
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100440 data.m_Parameters.m_StrideX = strideX;
441 data.m_Parameters.m_StrideY = strideY;
442 data.m_Parameters.m_PadLeft = padLeft;
443 data.m_Parameters.m_PadRight = padRight;
444 data.m_Parameters.m_PadTop = padTop;
445 data.m_Parameters.m_PadBottom = padBottom;
446 data.m_Parameters.m_BiasEnabled = biasEnabled;
447 data.m_Parameters.m_DataLayout = dataLayout;
448
449 armnn::WorkloadInfo info;
450 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100451 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100452 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
453
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100454 if (biasEnabled)
455 {
456 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
457 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
458 }
459
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000460 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
461 data,
462 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100463 inputHandle->Allocate();
464 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100465 weightsHandle->Allocate();
466
467 if (biasEnabled)
468 {
469 biasHandle->Allocate();
470 CopyDataToITensorHandle(biasHandle.get(), bias.data());
471 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100472
Sadik Armagan483c8112021-06-01 09:24:52 +0100473 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100474 CopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100475
476 ExecuteWorkload(*workload, memoryManager);
477
Sadik Armagan483c8112021-06-01 09:24:52 +0100478 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100479
Sadik Armagan483c8112021-06-01 09:24:52 +0100480 return LayerTestResult<O, 4>(actualOutput,
481 outputData,
482 outputHandle->GetShape(),
483 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100484}
485
486template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
487LayerTestResult<T,4> Convolution1dTestImpl(
488 armnn::IWorkloadFactory& workloadFactory,
489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100490 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100491 float qScale,
492 int32_t qOffset,
493 bool biasEnabled)
494{
495 using B = armnn::ResolveType<ArmnnBType>;
496 // Until we have a specialist 1D convolution layer, we can fake one using
497 // 2D convolution with the final dimension set to 1.
498 // I don't anticipate this being particularly slow, given that convolution is implemented
499 // as a matrix multiplication, at which point dimension doesn't matter.
500
501 unsigned int batchSize = 1;
502 unsigned int inputChannels = 2;
503 unsigned int outputChannels = 3;
504 unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
505 unsigned int kernelSize = 3;
506 unsigned int padSize = 2;
507 unsigned int stride = 1;
508 unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
509
510 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
511 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
512 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100513 kernelInfo.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100514 armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +0100515 biasInfo.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100516
517 // Set quantization parameters if the requested type is a quantized type.
518 if(armnn::IsQuantizedType<T>())
519 {
520 inputInfo.SetQuantizationScale(qScale);
521 inputInfo.SetQuantizationOffset(qOffset);
522 outputInfo.SetQuantizationScale(qScale);
523 outputInfo.SetQuantizationOffset(qOffset);
524 kernelInfo.SetQuantizationScale(qScale);
525 kernelInfo.SetQuantizationOffset(qOffset);
526 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
527 biasInfo.SetQuantizationOffset(0);
528 }
529
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100530 std::vector<T> inputData = QuantizedVector<T>(
531 {
532 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
533 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
534 },
535 inputInfo.GetQuantizationScale(),
536 inputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100537
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100538 std::vector<T> kernelData = QuantizedVector<T>(
539 {
540 1.0f, 0.0f, 0.0f,
541 0.0f, 2.0f, -1.5f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100542
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100543 0.0f, 0.0f, 0.0f,
544 0.2f, 0.2f, 0.2f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100545
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100546 0.5f, 0.0f, 0.5f,
547 0.0f, -1.0f, 0.0f
548 },
549 kernelInfo.GetQuantizationScale(),
550 kernelInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100551
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100552 std::vector<B> biasData =
553 QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100554
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100555 std::vector<T> outputData = QuantizedVector<T>(
556 {
557 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100558 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100559 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
560 },
561 outputInfo.GetQuantizationScale(),
562 outputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100563
Sadik Armagan483c8112021-06-01 09:24:52 +0100564 std::vector<T> actualOutput(outputInfo.GetNumElements());
565
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100566 // Optionally apply bias to output image.
567 if(biasEnabled)
568 {
569 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
570 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
571 1, outputSize);
572 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100573
574 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
575 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100576 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
577 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +0100578
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100579 armnn::Convolution2dQueueDescriptor data;
580 armnn::WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100581
582 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100583 AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100584 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
585
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100586 data.m_Parameters.m_StrideX = 1;
587 data.m_Parameters.m_StrideY = stride;
588 data.m_Parameters.m_PadLeft = 0;
589 data.m_Parameters.m_PadRight = 0;
590 data.m_Parameters.m_PadTop = padSize;
591 data.m_Parameters.m_PadBottom = padSize;
592 data.m_Parameters.m_BiasEnabled = biasEnabled;
593
594 if (biasEnabled)
595 {
596 biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
597 AddInputToWorkload(data, info, biasInfo, biasHandle.get());
598 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100599
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000600 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
601 data,
602 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100603 inputHandle->Allocate();
604 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100605 weightsHandle->Allocate();
606
607 if (biasEnabled)
608 {
609 biasHandle->Allocate();
610 CopyDataToITensorHandle(biasHandle.get(), biasData.data());
611 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100612
613 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100614 CopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100615
616 ExecuteWorkload(*workload, memoryManager);
617
Sadik Armagan483c8112021-06-01 09:24:52 +0100618 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
619
620 return LayerTestResult<T, 4>(actualOutput,
621 outputData,
622 outputHandle->GetShape(),
623 outputInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100624}
625
626template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
627LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
628 armnn::IWorkloadFactory& workloadFactory,
629 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100630 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100631 float qScale,
632 int32_t qOffset,
633 bool biasEnabled,
634 armnn::DataLayout dataLayout)
635{
Jan Eilers8eb25602020-03-09 12:13:48 +0000636 armnn::IgnoreUnused(biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100637 // Use common single-batch 5x5 image.
638
Sadik Armagan483c8112021-06-01 09:24:52 +0100639 armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType);
640 std::vector<T> input =
641 {
642 1, 5, 2, 3,
643 8, 7, 3, 6,
644 3, 3, 9, 1
645 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100646
647 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100648 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
649 std::vector<T> kernel =
650 {
651 4, 5, 6,
652 0, 0, 0,
653 3, 2, 1
654 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100655
656 // Expected output is 1 batch of a 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100657 armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100658 const std::vector<float> outputData =
Sadik Armagan483c8112021-06-01 09:24:52 +0100659 {
660 23, 41, 33, 21,
661 44, 65, 76, 52,
662 82, 85, 79, 42
663 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100664
665 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
666 workloadFactory,
667 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100668 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100669 input,
670 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100671 std::vector<T>(),
672 outputData,
673 inputDesc.GetShape(),
674 kernelDesc.GetShape(),
675 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100676 dataLayout,
677 qScale,
678 qOffset);
679}
680
681template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
682LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
683 armnn::IWorkloadFactory& workloadFactory,
684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100685 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100686 float qScale,
687 int32_t qOffset,
688 bool biasEnabled,
689 const armnn::DataLayout& dataLayout)
690{
Jan Eilers8eb25602020-03-09 12:13:48 +0000691 armnn::IgnoreUnused(biasEnabled);
Derek Lambertic374ff02019-12-10 21:57:35 +0000692
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100693 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100694 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType);
695 std::vector<T> input =
696 {
697 1, 5, 2, 3, 5,
698 8, 7, 3, 6, 3,
699 3, 3, 9, 1, 9,
700 4, 1, 8, 1, 3,
701 6, 8, 1, 9, 2
702 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100703
704 // Use a 3x3 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100705 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
706 std::vector<T> kernel =
707 {
708 4, 5, 6,
709 0, 0, 0,
710 3, 2, 1
711 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100712
713 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100714 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType);
715 std::vector<T> outputData =
716 {
717 23, 33, 24,
718 91, 99, 48,
719 26, 50, 19
720 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100721
722 uint32_t padLeft = 1;
723 uint32_t padTop = 1;
724 uint32_t padRight = 1;
725 uint32_t padBottom = 1;
726 uint32_t strideX = 2;
727 uint32_t strideY = 2;
728
729 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
730 workloadFactory,
731 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100732 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100733 input,
734 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100735 std::vector<T>(),
736 outputData,
737 inputDesc.GetShape(),
738 kernelDesc.GetShape(),
739 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100740 dataLayout,
741 qScale,
742 qOffset,
743 padLeft,
744 padTop,
745 padRight,
746 padBottom,
747 strideX,
748 strideY);
749}
750
751template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
752LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
753 armnn::IWorkloadFactory& workloadFactory,
754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100755 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100756 float qScale,
757 int32_t qOffset,
758 bool biasEnabled,
759 const armnn::DataLayout layout)
760{
761 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100762 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
763 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100764
765 // Use a 2-element batch with 3-channel 3x5 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100766 armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType);
767 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100768 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100769 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100770 1, 1, 1,
771 1, 1, 1,
772 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100773
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100774 0, 0, 0,
775 0, 0, 0,
776 0, 0, 0,
777 0, 0, 0,
778 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100779
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100780 2, 2, 2,
781 2, 2, 2,
782 2, 2, 2,
783 2, 2, 2,
784 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100785
786
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100787 0, 0, 0,
788 0, 0, 0,
789 0, 0, 0,
790 0, 0, 0,
791 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100792
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100793 1, 1, 1,
794 1, 1, 1,
795 1, 1, 1,
796 1, 1, 1,
797 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100798
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100799 0, 0, 0,
800 0, 0, 0,
801 0, 0, 0,
802 0, 0, 0,
803 0, 0, 0
804 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100805 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100806
807 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100808 armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType);
809 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100810 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
811 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
812 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
813 -23.5f, -23.5f, -23.5f,
814 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
815 -23.5f, -23.5f, -23.5f,
816
817 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
818 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
819 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
820 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100821 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100822 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100823
824 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
825 workloadFactory,
826 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100827 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100828 input,
829 kernel,
830 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
831 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100832 inputDesc.GetShape(),
833 kernelDesc.GetShape(),
834 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100835 qScale,
836 qOffset,
837 layout);
838}
839
840template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
841 typename T = armnn::ResolveType<ArmnnType>>
842LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
843 armnn::IWorkloadFactory& workloadFactory,
844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100845 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100846 float qScale,
847 int32_t qOffset,
848 bool biasEnabled,
849 const armnn::DataLayout layout)
850{
851 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
852
853 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100854 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
855 std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
856 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100857
858 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100859 armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType);
860 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100861 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100862 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100863 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100864
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100865 0, 0, 0,
866 0, 0, 0,
867 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100868
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100869 2, 2, 2,
870 2, 2, 2,
871 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100872
873
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100874 0, 0, 0,
875 0, 0, 0,
876 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100877
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100878 1, 1, 1,
879 1, 1, 1,
880 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100881
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100882 0, 0, 0,
883 0, 0, 0,
884 0, 0, 0
885 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100886 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100887
888 // Expected output is 1 batch of a 2-channel 14x6 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100889 armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType);
890 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100891 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
892 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
893 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
894 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
895 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
896 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
897
898 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
899 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
900 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
901 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
902 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
903 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100904 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100905 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100906
907 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
908 workloadFactory,
909 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100910 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100911 input,
912 kernel,
913 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
914 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100915 inputDesc.GetShape(),
916 kernelDesc.GetShape(),
917 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100918 qScale,
919 qOffset,
920 layout);
921}
922
923template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
924 typename T = armnn::ResolveType<ArmnnType>>
925LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
926 armnn::IWorkloadFactory& workloadFactory,
927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100928 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100929 const armnn::DataLayout layout,
930 float qScale,
931 int32_t qOffset)
932{
933 // Use a single-batch 1-channel 3x3 image as input.
Sadik Armagan483c8112021-06-01 09:24:52 +0100934 armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType);
935 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100936 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100937 11,21,31,
938 12,22,32,
939 13,23,33
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100940 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100941 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100942
943 // Use 1 batch of a 1-channel 2x2 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100944 armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType);
945 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100946 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100947 -11,-21,
948 -12,-22,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100949 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100950 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100951
952// Expected output is 1 batch of a 1-channel 6x8 image.
953// Manually calculated like this:
954//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
955//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
956//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
957//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
958//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
959//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
960//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Sadik Armagan483c8112021-06-01 09:24:52 +0100961 armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType);
962 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100963 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100964 0, 0, 0, 0, 0, 0,
965 -242, -594, -934, -372, 0, 0,
966 -495, -1190, -1850, -725, 0, 0,
967 -538, -1256, -1916, -748, 0, 0,
968 -273, -626, -946, -363, 0, 0,
969 0, 0, 0, 0, 0, 0,
970 0, 0, 0, 0, 0, 0,
971 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100972 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100973 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100974
975 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
976 workloadFactory,
977 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100978 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100979 input,
980 kernel,
981 GetBias2<ArmnnBType>(false, qScale * qScale),
982 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100983 inputDesc.GetShape(),
984 kernelDesc.GetShape(),
985 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100986 qScale,
987 qOffset,
988 layout,
989 1, // Padding left.
990 2, // Padding top.
991 3, // Padding right.
992 4); // Padding bottom.
993}
994
995template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
996 typename T = armnn::ResolveType<ArmnnType>>
997LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
998 armnn::IWorkloadFactory& workloadFactory,
999 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001000 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001001 const armnn::DataLayout layout,
1002 float qScale,
1003 int32_t qOffset)
1004{
1005 // Use a single-batch 1-channel 5x5 image as input.
1006 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001007 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001008 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001009 11,21,31,41,51,
1010 12,22,32,42,52,
1011 13,23,33,43,53,
1012 14,24,34,44,54,
1013 15,25,35,45,55,
Sadik Armagan483c8112021-06-01 09:24:52 +01001014 }, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001015
1016 // Use 1 batch of a 1-channel 4x4 kernel.
1017 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001018 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001019 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001020 -11,-21,-31,-41,
1021 -12,-22,-32,-42,
1022 -13,-23,-33,-43,
1023 -14,-24,-34,-44,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001024 },
Sadik Armagan483c8112021-06-01 09:24:52 +01001025 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001026
1027 // Expected output is 1 batch of a 1-channel 5x5 image.
1028 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001029 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001030 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001031 -7140, -10580, -13940, -9300, -5230,
1032 -9590, -14120, -18520, -12290, -6860,
1033 -9980, -14560, -18960, -12560, -7000,
1034 -7518, -10904, -14144, -9318, -5152,
1035 -5032, -7256, -9376, -6142, -3368,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001036 },
Sadik Armagan483c8112021-06-01 09:24:52 +01001037 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001038
1039 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1040 workloadFactory,
1041 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001042 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001043 input,
1044 kernel,
1045 GetBias2<ArmnnBType>(false, qScale * qScale),
1046 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001047 inputDesc.GetShape(),
1048 kernelDesc.GetShape(),
1049 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001050 qScale,
1051 qOffset,
1052 layout,
1053 1, // Padding left.
1054 1, // Padding top.
1055 2, // Padding right.
1056 2); // Padding bottom.
1057}
1058
1059template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1060LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
1061 armnn::IWorkloadFactory& workloadFactory,
1062 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001063 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001064 const std::vector<float>& inputNoQuantizedValues,
1065 armnn::TensorInfo& inputTensorInfo,
1066 const std::vector<float>& kernelNoQuantizedValues,
1067 armnn::TensorInfo& kernelTensorInfo,
1068 const std::vector<float>& outputExpectedNoQuantizedValues,
1069 armnn::TensorInfo& outputTensorInfo,
1070 uint32_t dilationX,
1071 uint32_t dilationY,
1072 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1073 uint32_t padLeft = 0,
1074 uint32_t padTop = 0,
1075 uint32_t padRight = 0,
1076 uint32_t padBottom = 0,
1077 uint32_t strideX = 1,
1078 uint32_t strideY = 1,
1079 bool biasEnabled = false
1080)
1081{
1082 float qScale;
1083 int32_t qOffset;
1084 switch (ArmnnType)
1085 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00001086 case armnn::DataType::QAsymmU8:
Sadik Armagan303980c2020-04-17 12:45:14 +01001087 case armnn::DataType::QAsymmS8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001088 {
1089 qScale = 0.1f;
1090 qOffset = 128;
1091 break;
1092 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00001093 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001094 {
1095 qScale = 0.1f;
1096 qOffset = 0;
1097 break;
1098 }
1099 case armnn::DataType::Float32:
1100 default:
1101 {
1102 qScale = 0.f;
1103 qOffset = 0;
1104 break;
1105 }
1106 }
1107
1108 inputTensorInfo.SetQuantizationScale(qScale);
1109 inputTensorInfo.SetQuantizationOffset(qOffset);
1110 kernelTensorInfo.SetQuantizationScale(qScale);
1111 kernelTensorInfo.SetQuantizationOffset(qOffset);
1112 outputTensorInfo.SetQuantizationScale(qScale);
1113 outputTensorInfo.SetQuantizationOffset(qOffset);
1114
Sadik Armagan483c8112021-06-01 09:24:52 +01001115 auto input = QuantizedVector<T>(inputNoQuantizedValues,
1116 inputTensorInfo.GetQuantizationScale(),
1117 inputTensorInfo.GetQuantizationOffset());
1118 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
1119 kernelTensorInfo.GetQuantizationScale(),
1120 kernelTensorInfo.GetQuantizationOffset());
1121 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
1122 outputTensorInfo.GetQuantizationScale(),
1123 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001124
1125 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1126 workloadFactory,
1127 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001128 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001129 input,
1130 kernel,
1131 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1132 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001133 inputTensorInfo.GetShape(),
1134 kernelTensorInfo.GetShape(),
1135 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001136 qScale,
1137 qOffset,
1138 layout,
1139 padLeft,
1140 padTop,
1141 padRight,
1142 padBottom,
1143 strideX,
1144 strideY,
1145 dilationX,
1146 dilationY);
1147}
1148
1149template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1150LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
1151 armnn::IWorkloadFactory& workloadFactory,
1152 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001153 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001154 bool biasEnabled,
1155 const armnn::DataLayout layout)
1156{
Sadik Armagan483c8112021-06-01 09:24:52 +01001157 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001158 std::vector<float> inputNoQuantizedValues =
1159 {
1160 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1161 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1162 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1163 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1164 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1165 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1166 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1167 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1168 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1169 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1170 };
1171
1172 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1173 std::vector<float> kernelNoQuantizedValues =
1174 {
1175 1, 2, 3,
1176 4, 5, 6,
1177 7, 8, 9
1178 };
1179
1180 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1181 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1182 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1183 std::vector<float> outputExpectedNoQuantizedValues =
1184 {
1185 6., 5., 5., 5.,
1186 6., 5., 5., 5.,
1187 6., 5., 5., 5.,
1188 3., 2., 2., 2.
1189 };
1190
1191 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1192 workloadFactory,
1193 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001194 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001195 inputNoQuantizedValues,
1196 inputTensorInfo,
1197 kernelNoQuantizedValues,
1198 kernelTensorInfo,
1199 outputExpectedNoQuantizedValues,
1200 outputTensorInfo,
1201 3,
1202 3,
1203 layout,
1204 biasEnabled);
1205}
1206
1207template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1208LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
1209 armnn::IWorkloadFactory& workloadFactory,
1210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001211 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001212 bool biasEnabled,
1213 const armnn::DataLayout layout)
1214{
Sadik Armagan483c8112021-06-01 09:24:52 +01001215 armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001216 std::vector<float> inputNoQuantizedValues =
1217 {
1218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1219 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1220 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1221 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1222 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1223 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1224 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1225 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1226 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1227 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1228
1229 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1230 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1232 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1233 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1234 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1235 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1236 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1237 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1238 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1239 };
1240
Sadik Armagan483c8112021-06-01 09:24:52 +01001241 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001242 std::vector<float> kernelNoQuantizedValues =
1243 {
1244 1, 2, 3,
1245 4, 5, 6,
1246 7, 8, 9,
1247
1248 1, 2, 3,
1249 4, 5, 6,
1250 7, 8, 9
1251 };
1252
1253 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1254 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
Sadik Armagan483c8112021-06-01 09:24:52 +01001255 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001256 std::vector<float> outputExpectedNoQuantizedValues =
1257 {
1258 12., 10., 10., 10.,
1259 12., 10., 10., 10.,
1260 12., 10., 10., 10.,
1261 6., 4., 4., 4.
1262 };
1263
1264 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1265 workloadFactory,
1266 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001267 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001268 inputNoQuantizedValues,
1269 inputTensorInfo,
1270 kernelNoQuantizedValues,
1271 kernelTensorInfo,
1272 outputExpectedNoQuantizedValues,
1273 outputTensorInfo,
1274 3,
1275 3,
1276 layout,
1277 biasEnabled);
1278}
1279
1280template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1281LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
Sadik Armagan483c8112021-06-01 09:24:52 +01001282 armnn::IWorkloadFactory& workloadFactory,
1283 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001284 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001285 bool biasEnabled,
1286 const armnn::DataLayout layout)
1287{
Sadik Armagan483c8112021-06-01 09:24:52 +01001288 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001289 std::vector<float> inputNoQuantizedValues =
1290 {
1291 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1292 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1293 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1294 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1295 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1296 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1297 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1298 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1299 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1300 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1301 };
1302
Sadik Armagan483c8112021-06-01 09:24:52 +01001303 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001304 std::vector<float> kernelNoQuantizedValues =
1305 {
1306 1, 2,
1307 3, 4
1308 };
1309
1310 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
1311 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
1312 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
1313 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1314 std::vector<float> outputExpectedNoQuantizedValues =
1315 {
1316 4, 7, 7, 3,
1317 6, 10, 10, 4,
1318 6, 10, 10, 4,
1319 2, 3, 3, 1
1320 };
1321 uint32_t padLeft = 1;
1322 uint32_t padTop = 1;
1323 uint32_t padRight = 1;
1324 uint32_t padBottom = 1;
1325
1326 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1327 workloadFactory,
1328 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001329 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001330 inputNoQuantizedValues,
1331 inputTensorInfo,
1332 kernelNoQuantizedValues,
1333 kernelTensorInfo,
1334 outputExpectedNoQuantizedValues,
1335 outputTensorInfo,
1336 2,
1337 2,
1338 layout,
1339 padLeft,
1340 padTop,
1341 padRight,
1342 padBottom,
1343 3,
1344 3,
1345 biasEnabled
1346 );
1347}
1348
1349template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1350LayerTestResult<T,4> CompareConvolution2dTestImpl(
1351 armnn::IWorkloadFactory& workloadFactory,
1352 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001353 armnn::IWorkloadFactory& refWorkloadFactory,
1354 const armnn::ITensorHandleFactory& tensorHandleFactory,
1355 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001356{
1357 unsigned int inputHeight = 8;
1358 unsigned int inputWidth = 16;
1359 unsigned int inputChannels = 3;
1360 unsigned int inputNum = 5;
1361
1362 unsigned int kernelHeight = 3;
1363 unsigned int kernelWidth = 3;
1364
1365 unsigned int strideX = 2;
1366 unsigned int strideY = 3;
1367 unsigned int padX = 1;
1368 unsigned int padY = 1;
1369
1370 unsigned int outputNum = inputNum;
1371 unsigned int outputChannels = 2;
1372 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1373 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1374
1375 armnn::TensorInfo inputTensorInfo;
1376 armnn::TensorInfo outputTensorInfo;
1377 armnn::TensorInfo kernelDesc;
1378 armnn::TensorInfo biasDesc;
1379
1380 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1381 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1382 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1383 unsigned int biasShape[] = {outputChannels};
1384
1385 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
1386 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
1387 kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001388 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001389 biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001390 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001391
Sadik Armagan483c8112021-06-01 09:24:52 +01001392 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
1393 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
1394 auto bias = MakeRandomTensor<T>(biasDesc, 1028);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001395
Sadik Armagan483c8112021-06-01 09:24:52 +01001396 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1397 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01001398
1399 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001400 std::unique_ptr<armnn::ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
1401 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01001402 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1403
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001404 armnn::Convolution2dQueueDescriptor data;
1405 armnn::WorkloadInfo info;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001406
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001407 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1408 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
1409 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
1410 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1411
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001412 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001413 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001414
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001415 data.m_Parameters.m_StrideX = strideX;
1416 data.m_Parameters.m_StrideY = strideY;
1417 data.m_Parameters.m_PadLeft = padX;
1418 data.m_Parameters.m_PadRight = padX;
1419 data.m_Parameters.m_PadTop = padY;
1420 data.m_Parameters.m_PadBottom = padY;
1421 data.m_Parameters.m_BiasEnabled = true;
Keith Davisf500d6c2020-08-31 08:32:55 +01001422
1423 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001424 std::unique_ptr<armnn::ITensorHandle> weightsHandleRef = refTensorHandleFactory.CreateTensorHandle(kernelDesc);
1425 std::unique_ptr<armnn::ITensorHandle> biasHandleRef = refTensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01001426 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1427
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001428 armnn::Convolution2dQueueDescriptor refData = data;
Sadik Armagan483c8112021-06-01 09:24:52 +01001429 armnn::WorkloadInfo refInfo = info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001430 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001431 SetWorkloadInput(refData, refInfo, 1, kernelDesc, weightsHandleRef.get());
1432 SetWorkloadInput(refData, refInfo, 2, biasDesc, biasHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001433 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1434
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001435 std::unique_ptr<armnn::IWorkload> workload
1436 = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, data, info);
1437 std::unique_ptr<armnn::IWorkload> workloadRef
1438 = refWorkloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001439
1440 outputHandleRef->Allocate();
1441 inputHandleRef->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001442 weightsHandleRef->Allocate();
1443 biasHandleRef->Allocate();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001444
1445 inputHandle->Allocate();
1446 outputHandle->Allocate();
1447
Sadik Armagan483c8112021-06-01 09:24:52 +01001448 CopyDataToITensorHandle(inputHandle.get(), input.data());
1449 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001450 CopyDataToITensorHandle(weightsHandleRef.get(), kernel.data());
1451 CopyDataToITensorHandle(biasHandleRef.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001452
1453 ExecuteWorkload(*workload, memoryManager);
1454
1455 workloadRef->PostAllocationConfigure();
1456 workloadRef->Execute();
1457
Sadik Armagan483c8112021-06-01 09:24:52 +01001458 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1459 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001460
Sadik Armagan483c8112021-06-01 09:24:52 +01001461 return LayerTestResult<T, 4>(actualOutput,
1462 expectedOutput,
1463 outputHandle->GetShape(),
1464 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001465}
1466
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001467LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
1468 armnn::IWorkloadFactory& workloadFactory,
1469 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001470 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001471 bool biasEnabled,
1472 const armnn::DataLayout& dataLayout)
1473{
1474 // BFloat16 input and weight, Float32 output
1475 armnn::IgnoreUnused(biasEnabled);
1476
1477 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001478 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001479
1480 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1481 {
1482 10.0367984f, // 10.0625
1483 2.0380895f, // 2.03125
1484 15.0420157f, // 15.0625
1485 22.0675631f, // 22.125
1486 8.0938920f, // 8.125
1487 5.0476106f, // 5.0625
1488 80.1035490f, // 80
1489 100.1260370f, // 100
1490 55.0461647f, // 55
1491 120.0883828f, // 120
1492 9.1159540f, // 9.125
1493 90.0498519f, // 90
1494 200.0104630f, // 200
1495 30.0154114f, // 30
1496 75.00137681f, // 75
1497 30.0344238f, // 30
1498 25.0356445f, // 25
1499 130.0495605f, // 130
1500 60.0683594f, // 60
1501 35.0991211f, // 35
1502 8.0461426f, // 8.0625
1503 12.0996094f, // 12.125
1504 98.1269530f, // 98
1505 125.0393066f, // 125
1506 5.103516f // 5.0937
1507 },
1508 1.0f, 0);
1509
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001510 // Use a 3x3 kernel.
1511 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1512
1513 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1514 {
1515 -0.126184f, // -0.125977
1516 -0.150468f, // -0.150391
1517 -0.101412f, // -0.101562
1518 -0.0586369f,// -0.0585938
1519 -0.0865864f,// -0.0864258
1520 -0.0435089f,// -0.043457
1521 0.0347555f, // 0.034668
1522 0.0323111f, // 0.0322266
1523 0.0385381f // 0.0385742
1524 },
1525 1.0f, 0);
1526
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001527 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001528 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001529
1530 // Expected output (with results if calculated as FP32 in the comments)
1531 const std::vector<float> outputData =
1532 {
1533 2.296875f, // 2.29240716
1534 5.75f, // 5.75851926
1535 3.78125f, // 3.79855026
1536 -11.625f, // -11.65498118
1537 -47.25f, // -47.27316893
1538 -30.0f, // -30.04771684
1539 -8.25f, // -8.28126168
1540 -43.5f, // -43.46531337
1541 -20.625f // -20.63477281
1542 };
1543
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001544 uint32_t padLeft = 1;
1545 uint32_t padTop = 1;
1546 uint32_t padRight = 1;
1547 uint32_t padBottom = 1;
1548 uint32_t strideX = 2;
1549 uint32_t strideY = 2;
1550
1551 return SimpleConvolution2dNhwcTestImpl
1552 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1553 workloadFactory,
1554 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001555 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001556 inputValues,
1557 kernelValues,
1558 std::vector<float>(),
1559 outputData,
1560 inputDesc.GetShape(),
1561 kernelDesc.GetShape(),
1562 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001563 dataLayout,
1564 1.0f,
1565 0,
1566 padLeft,
1567 padTop,
1568 padRight,
1569 padBottom,
1570 strideX,
1571 strideY);
1572}
1573
1574LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
1575 armnn::IWorkloadFactory& workloadFactory,
1576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001577 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001578 bool biasEnabled,
1579 const armnn::DataLayout& dataLayout)
1580{
1581 // BFloat16 input and weight, Float32 output
1582 armnn::IgnoreUnused(biasEnabled);
1583
1584 // Input is a single-batch, 1 channel, 5x5 image.
1585 armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1586
1587 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1588 {
1589 0.0367984f, // 0.0368652
1590 0.0380895f, // 0.0380859
1591 0.0420157f, // 0.0419922
1592 0.0675631f, // 0.0673828
1593 0.0938920f, // 0.09375
1594 0.0476106f, // 0.0476074
1595 0.1035490f, // 0.103516
1596 0.1260370f, // 0.125977
1597 0.0461647f, // 0.0461426
1598 0.0883828f, // 0.0883789
1599 0.1159540f, // 0.115723
1600 0.0498519f, // 0.0498047
1601 0.0104630f, // 0.010437
1602 0.0154114f, // 0.0154419
1603 0.00137681f, // 0.00137329
1604 0.0344238f, // 0.0344616
1605 0.0356445f, // 0.0355693
1606 0.0495605f, // 0.0495018
1607 0.0683594f, // 0.0683308
1608 0.0991211f, // 0.0988837
1609 0.0461426f, // 0.0461838
1610 0.0996094f, // 0.0997546
1611 0.1269530f, // 0.127099
1612 0.0393066f, // 0.0392791
1613 0.103516f // 0.103641
1614 },
1615 1.0f, 0);
1616
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001617 // Use a 3x3 kernel.
1618 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1619
1620 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1621 {
1622 -0.126184f, // -0.125977
1623 -0.150468f, // -0.150391
1624 -0.101412f, // -0.101562
1625 -0.0586369f,// -0.0585938
1626 -0.0865864f,// -0.0864258
1627 -0.0435089f,// -0.043457
1628 0.0347555f, // 0.034668
1629 0.0323111f, // 0.0322266
1630 0.0385381f // 0.0385742
1631 },
1632 1.0f, 0);
1633
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001634 // Expected output is a single-batch, 1 channel, 3x3 image.
1635 armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1636
1637 // Expected output (with results if calculated as FP32 in the comments)
1638 const std::vector<float> outputData =
1639 {
1640 0.000686645508f, // 0.000685
1641 0.000640869141f, // 0.000639
1642 -0.00759887695f, // -0.007631
1643 -0.02734375f, // -0.027388
1644 -0.0356445312f, // -0.035737
1645 -0.0145874023f, // -0.014568
1646 -0.0170898438f, // -0.017124
1647 -0.0373535156f, // -0.037431
1648 -0.0346679688f // -0.034808
1649 };
1650
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001651 uint32_t padLeft = 1;
1652 uint32_t padTop = 1;
1653 uint32_t padRight = 1;
1654 uint32_t padBottom = 1;
1655 uint32_t strideX = 2;
1656 uint32_t strideY = 2;
1657
1658 return SimpleConvolution2dNhwcTestImpl
1659 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1660 workloadFactory,
1661 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001662 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001663 inputValues,
1664 kernelValues,
1665 std::vector<float>(),
1666 outputData,
1667 inputDesc.GetShape(),
1668 kernelDesc.GetShape(),
1669 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001670 dataLayout,
1671 1.0f,
1672 0,
1673 padLeft,
1674 padTop,
1675 padRight,
1676 padBottom,
1677 strideX,
1678 strideY);
1679}
1680
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001681//
1682// DepthwiseConvolution2d implementations
1683//
1684
1685template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1686 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
1687LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
1688 armnn::IWorkloadFactory& workloadFactory,
1689 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001690 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001691 const std::vector<T>& input,
1692 const std::vector<T>& kernel,
1693 const std::vector<B>& bias,
1694 const std::vector<T>& outputExpected,
1695 const armnn::TensorShape& inputShape,
1696 const armnn::TensorShape& kernelShape,
1697 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001698 float qScale,
1699 int32_t qOffset,
1700 const armnn::DataLayout layout,
1701 uint32_t padLeft = 0,
1702 uint32_t padTop = 0,
1703 uint32_t padRight = 0,
1704 uint32_t padBottom = 0,
1705 uint32_t strideX = 1,
1706 uint32_t strideY = 1)
1707{
Sadik Armagan483c8112021-06-01 09:24:52 +01001708 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
1709 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[1]);
1710 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
1711 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
Jan Eilers53ef7952021-06-02 12:01:25 +01001712 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
1713 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
1714 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
Sadik Armagan483c8112021-06-01 09:24:52 +01001715 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
1716 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
1717 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
1718 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001719
1720 // If a bias is used, its size must equal the number of output channels.
1721 bool biasEnabled = bias.size() > 0;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001722 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001723
1724 // Creates the tensors.
1725 armnn::TensorInfo inputTensorInfo =
1726 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1727 armnn::TensorInfo outputTensorInfo =
1728 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001729 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001730 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001731 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001732 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001733
1734 // Set quantization parameters if the requested type is a quantized type.
1735 if (armnn::IsQuantizedType<T>())
1736 {
1737 inputTensorInfo.SetQuantizationScale(qScale);
1738 inputTensorInfo.SetQuantizationOffset(qOffset);
1739 outputTensorInfo.SetQuantizationScale(qScale);
1740 outputTensorInfo.SetQuantizationOffset(qOffset);
1741 kernelDesc.SetQuantizationScale(qScale);
1742 kernelDesc.SetQuantizationOffset(qOffset);
1743 biasDesc.SetQuantizationScale(qScale*qScale);
1744 biasDesc.SetQuantizationOffset(0);
1745 }
1746
1747 // Construct the input data.
1748 std::vector<T> inputData;
1749 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1750
1751 // At this point if we require it permute the input data
1752 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1753 if (layout == armnn::DataLayout::NHWC)
1754 {
1755 std::vector<T> tmp(inputData.size());
1756 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1757 inputData = tmp;
1758 }
1759
Cathal Corbett4b19d222022-05-11 20:12:17 +01001760 std::vector<T> kernelData;
1761 kernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
1762 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
1763 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
1764 {
1765 if (layout == armnn::DataLayout::NCHW)
1766 {
1767 std::vector<T> tmp(kernelData.size());
1768 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
1769 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
1770 kernelData = tmp;
1771 }
1772 }
1773
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001774 // Construct the output data, with bias applied, as appropriate.
1775 std::vector<T> outputData;
1776 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1777 if (biasEnabled)
1778 {
1779 std::vector<T> biasV;
1780 biasV.assign(bias.data(), bias.data() + outputChannels);
1781 ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1782 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1783 outputWidth, outputHeight);
1784 }
1785
Sadik Armagan483c8112021-06-01 09:24:52 +01001786 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001787
1788 // At this point if we require it permute the expected output
1789 if (layout == armnn::DataLayout::NHWC)
1790 {
1791 std::vector<T> tmp(outputData.size());
1792 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
1793 outputData = tmp;
1794 }
1795
Keith Davisf500d6c2020-08-31 08:32:55 +01001796 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01001797 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
1798 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01001799 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1800
Cathal Corbett06902652022-04-14 17:55:11 +01001801 armnn::DepthwiseConvolution2dQueueDescriptor data;
1802 armnn::WorkloadInfo info;
1803
Cathal Corbett4b19d222022-05-11 20:12:17 +01001804 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Cathal Corbett06902652022-04-14 17:55:11 +01001805
1806 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1807 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
1808 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001809
James Conroy1f58f032021-04-27 17:13:27 +01001810 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001811 if (biasEnabled)
1812 {
Sadik Armagan483c8112021-06-01 09:24:52 +01001813 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Cathal Corbett06902652022-04-14 17:55:11 +01001814
1815 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
1816 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
1817 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001818 }
1819
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001820 data.m_Parameters.m_StrideX = strideX;
1821 data.m_Parameters.m_StrideY = strideY;
1822 data.m_Parameters.m_PadLeft = padLeft;
1823 data.m_Parameters.m_PadRight = padRight;
1824 data.m_Parameters.m_PadTop = padTop;
1825 data.m_Parameters.m_PadBottom = padBottom;
1826 data.m_Parameters.m_BiasEnabled = biasEnabled;
1827 data.m_Parameters.m_DataLayout = layout;
1828
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001829 std::unique_ptr<armnn::IWorkload> workload
1830 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01001831
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001832 inputHandle->Allocate();
1833 outputHandle->Allocate();
1834
Sadik Armagan483c8112021-06-01 09:24:52 +01001835 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001836
1837 ExecuteWorkload(*workload, memoryManager);
1838
Sadik Armagan483c8112021-06-01 09:24:52 +01001839 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001840
Sadik Armagan483c8112021-06-01 09:24:52 +01001841 return LayerTestResult<T, 4>(actualOutput,
1842 outputData,
1843 outputHandle->GetShape(),
1844 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001845}
1846
1847template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1848LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
1849 armnn::IWorkloadFactory& workloadFactory,
1850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001851 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001852 float qScale,
1853 int32_t qOffset,
1854 bool biasEnabled,
1855 const armnn::DataLayout layout)
1856{
1857 using B = armnn::ResolveType<ArmnnBType>;
1858
1859 unsigned int inputHeight = 3;
1860 unsigned int inputWidth = 3;
1861 unsigned int inputChannels = 2;
1862 unsigned int inputNum = 1;
1863
1864 unsigned int kernelHeight = 3;
1865 unsigned int kernelWidth = 3;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001866
1867 unsigned int outputHeight = 1;
1868 unsigned int outputWidth = 1;
Jan Eilers53ef7952021-06-02 12:01:25 +01001869 unsigned int outputChannels = inputChannels;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001870 unsigned int outputNum = inputNum;
1871
1872 armnn::TensorInfo inputTensorInfo =
1873 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1874 armnn::TensorInfo outputTensorInfo =
1875 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001876 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
1877 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001878 armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01001879 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001880
1881 // Set quantization parameters if the requested type is a quantized type.
1882 if(armnn::IsQuantizedType<T>())
1883 {
1884 inputTensorInfo.SetQuantizationScale(qScale);
1885 inputTensorInfo.SetQuantizationOffset(qOffset);
1886 outputTensorInfo.SetQuantizationScale(qScale);
1887 outputTensorInfo.SetQuantizationOffset(qOffset);
1888 kernelDesc.SetQuantizationScale(qScale);
1889 kernelDesc.SetQuantizationOffset(qOffset);
1890 biasDesc.SetQuantizationScale(qScale*qScale);
1891 biasDesc.SetQuantizationOffset(0);
1892 }
1893 std::vector<T> inputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001894 QuantizedVector<T>({
1895 1.f, 2.f, 1.f,
1896 2.f, 1.f, 2.f,
1897 1.f, 2.f, 1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001898
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001899 1.f, 2.f, 1.f,
1900 2.f, 1.f, 2.f,
1901 1.f, 2.f, 1.f,
1902 },
1903 inputTensorInfo.GetQuantizationScale(),
1904 inputTensorInfo.GetQuantizationOffset()));
1905
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001906 // at this point if we require it permute the input data
1907 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1908 if (layout == armnn::DataLayout::NHWC)
1909 {
1910 std::vector<T> tmp(inputData.size());
1911 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1912 inputData = tmp;
1913 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001914
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001915 std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1916 biasDesc.GetQuantizationScale(),
1917 biasDesc.GetQuantizationOffset()));
1918
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001919 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001920 QuantizedVector<T>({
1921 1.f, 0.f, 1.f,
1922 0.f, 0.f, 0.f,
1923 -1.f, 0.f, -1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001924
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001925 1.f, 0.f, 1.f,
1926 0.f, 0.f, 0.f,
1927 -1.f, 0.f, -1.f,
1928 },
1929 kernelDesc.GetQuantizationScale(),
1930 kernelDesc.GetQuantizationOffset()));
1931
Cathal Corbett4b19d222022-05-11 20:12:17 +01001932 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
1933 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
1934 {
1935 if (layout == armnn::DataLayout::NCHW)
1936 {
1937 std::vector<T> tmp(kernelData.size());
1938 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
1939 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
1940 kernelData = tmp;
1941 }
1942 }
1943
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001944 // Manually calculated.
1945 std::vector<T> outputImage(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001946 QuantizedVector<T>({ 0.f, 0.f },
1947 outputTensorInfo.GetQuantizationScale(),
1948 outputTensorInfo.GetQuantizationOffset())
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001949 );
1950
1951 // Optionally apply bias to output image.
1952 if(biasEnabled)
1953 {
1954 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1955 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1956 outputWidth, outputHeight);
1957 }
1958
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001959 if (layout == armnn::DataLayout::NHWC)
1960 {
1961 std::vector<T> tmp(outputImage.size());
1962 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
1963 outputImage = tmp;
1964 }
1965
Sadik Armagan483c8112021-06-01 09:24:52 +01001966 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01001967
1968 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01001969 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
1970 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01001971 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1972
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001973 armnn::DepthwiseConvolution2dQueueDescriptor data;
1974 armnn::WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001975
Cathal Corbett06902652022-04-14 17:55:11 +01001976 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001977
1978 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01001979 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001980 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1981
Cathal Corbett06902652022-04-14 17:55:11 +01001982 armnn::ScopedTensorHandle biasTensor(biasDesc);
1983 if (biasEnabled)
1984 {
1985 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
1986
1987 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
1988 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasV.data());
1989 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
1990 }
1991
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001992 data.m_Parameters.m_StrideX = 1;
1993 data.m_Parameters.m_StrideY = 1;
1994 data.m_Parameters.m_PadLeft = 0;
1995 data.m_Parameters.m_PadRight = 0;
1996 data.m_Parameters.m_PadTop = 0;
1997 data.m_Parameters.m_PadBottom = 0;
1998 data.m_Parameters.m_BiasEnabled = biasEnabled;
1999 data.m_Parameters.m_DataLayout = layout;
2000
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002001 std::unique_ptr<armnn::IWorkload> workload
2002 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002003
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002004 inputHandle->Allocate();
2005 outputHandle->Allocate();
2006
Sadik Armagan483c8112021-06-01 09:24:52 +01002007 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002008
2009 ExecuteWorkload(*workload, memoryManager);
2010
Sadik Armagan483c8112021-06-01 09:24:52 +01002011 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002012
Sadik Armagan483c8112021-06-01 09:24:52 +01002013 return LayerTestResult<T, 4>(actualOutput,
2014 outputImage,
2015 outputHandle->GetShape(),
2016 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002017}
2018
2019template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
2020LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
2021 armnn::IWorkloadFactory& workloadFactory,
2022 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002023 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002024 float qScale,
2025 int32_t qOffset,
2026 bool biasEnabled,
2027 const armnn::DataLayout layout)
2028{
2029 using B = armnn::ResolveType<ArmnnBType>;
2030
2031 unsigned int depthMultiplier = 2;
2032
2033 unsigned int inputHeight = 8;
2034 unsigned int inputWidth = 16;
2035 unsigned int inputChannels = 2;
2036 unsigned int inputBatchSize = 1;
2037
2038 unsigned int kernelHeight = 5;
2039 unsigned int kernelWidth = 3;
2040
2041 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
2042 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
2043 unsigned int outputChannels = inputChannels * depthMultiplier;
2044 unsigned int outputBatchSize = inputBatchSize;
2045
2046 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
2047 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2048 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
2049 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01002050 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType);
2051 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002052 armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01002053 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002054
2055 // Set quantization parameters if the requested type is a quantized type.
2056 if(armnn::IsQuantizedType<T>())
2057 {
2058 inputTensorInfo.SetQuantizationScale(qScale);
2059 inputTensorInfo.SetQuantizationOffset(qOffset);
2060 outputTensorInfo.SetQuantizationScale(qScale);
2061 outputTensorInfo.SetQuantizationOffset(qOffset);
2062 kernelDesc.SetQuantizationScale(qScale);
2063 kernelDesc.SetQuantizationOffset(qOffset);
2064 biasDesc.SetQuantizationScale(qScale*qScale);
2065 biasDesc.SetQuantizationOffset(0);
2066 }
2067
2068 // NOTE: originalInputData is in NCHW format
2069 std::vector<T> originalInputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002070 QuantizedVector<T>({
2071 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2072 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2073 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2074 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2075 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2076 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2077 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2078 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2079 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2080 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2081 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2082 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2083 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2084 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2085 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2086 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
2087 },
2088 inputTensorInfo.GetQuantizationScale(),
2089 inputTensorInfo.GetQuantizationOffset()));
2090
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002091 std::vector<T> inputData = originalInputData;
2092 // at this point if we require it permute the input data
2093 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2094 if (layout == armnn::DataLayout::NHWC)
2095 {
2096 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
2097 originalInputData.data(), inputData.data(), sizeof(T));
2098 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002099
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002100 std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2101 biasDesc.GetQuantizationScale(),
2102 biasDesc.GetQuantizationOffset());
2103
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002104 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002105 QuantizedVector<T>({
2106 1, 1, 1,
2107 1, -1, 1,
2108 1, 1, 1,
2109 1, 1, 1,
2110 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002111
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002112 2, 2, 2,
2113 2, 2, 2,
2114 2, 2, 2,
2115 2, 2, 2,
2116 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002117
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002118 0, 0, 0,
2119 0, -1, 0,
2120 0, 0, 0,
2121 0, 0, 0,
2122 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002123
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002124 0, 0, 0,
2125 0, 0, 0,
2126 0, 1, 0,
2127 0, 0, 0,
2128 0, 0, 0
2129 },
2130 kernelDesc.GetQuantizationScale(),
2131 kernelDesc.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002132
Cathal Corbett4b19d222022-05-11 20:12:17 +01002133 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
2134 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
2135 {
2136 if (layout == armnn::DataLayout::NCHW)
2137 {
2138 std::vector<T> tmp(kernelData.size());
2139 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
2140 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
2141 kernelData = tmp;
2142 }
2143 }
2144
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002145 // Manually calculated.
2146 std::vector<T> originalOutputImage = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002147 QuantizedVector<T>({
Jan Eilers53ef7952021-06-02 12:01:25 +01002148 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2149 5, 5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5,
2150 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5, 5, 5, 5, 5, 5, 5,
2151 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5,
2152 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 6, 6, 6, 6, 6, 6, 6,
2153 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
2154 1, 3, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2155 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2156 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2157 2, 4, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2158 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2159 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002160 },
2161 outputTensorInfo.GetQuantizationScale(),
2162 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002163
2164 // Optionally apply bias to output image.
2165 if(biasEnabled)
2166 {
2167 ApplyBias(originalOutputImage,
2168 outputTensorInfo.GetQuantizationScale(),
2169 outputTensorInfo.GetQuantizationOffset(),
2170 biasV,
2171 biasDesc.GetQuantizationScale(),
2172 biasDesc.GetQuantizationOffset(),
2173 outputWidth,
2174 outputHeight);
2175 }
2176
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002177 std::vector<T> outputImage = originalOutputImage;
2178 if (layout == armnn::DataLayout::NHWC)
2179 {
2180 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
2181 originalOutputImage.data(), outputImage.data(), sizeof(T));
2182 }
2183
Sadik Armagan483c8112021-06-01 09:24:52 +01002184 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01002185
2186 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01002187 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
2188 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01002189 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2190
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002191 armnn::DepthwiseConvolution2dQueueDescriptor data;
2192 armnn::WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002193
Cathal Corbett06902652022-04-14 17:55:11 +01002194 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002195
2196 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01002197 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002198 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2199
Cathal Corbett06902652022-04-14 17:55:11 +01002200 armnn::ScopedTensorHandle biasTensor(biasDesc);
2201 if (biasEnabled)
2202 {
2203 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
2204
2205 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
2206 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasV.data());
2207 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
2208 }
2209
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002210 data.m_Parameters.m_StrideX = 2;
2211 data.m_Parameters.m_StrideY = 1;
2212 data.m_Parameters.m_PadLeft = 0;
2213 data.m_Parameters.m_PadRight = 0;
2214 data.m_Parameters.m_PadTop = 1;
2215 data.m_Parameters.m_PadBottom = 1;
2216 data.m_Parameters.m_BiasEnabled = biasEnabled;
2217 data.m_Parameters.m_DataLayout = layout;
2218
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002219 std::unique_ptr<armnn::IWorkload> workload
2220 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002221
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002222 inputHandle->Allocate();
2223 outputHandle->Allocate();
2224
Sadik Armagan483c8112021-06-01 09:24:52 +01002225 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002226
2227 ExecuteWorkload(*workload, memoryManager);
2228
Sadik Armagan483c8112021-06-01 09:24:52 +01002229 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002230
Sadik Armagan483c8112021-06-01 09:24:52 +01002231 return LayerTestResult<T, 4>(actualOutput,
2232 outputImage,
2233 outputHandle->GetShape(),
2234 outputTensorInfo.GetShape());
2235
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002236}
2237
2238template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2239 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
2240LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
2241 armnn::IWorkloadFactory& workloadFactory,
2242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002243 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01002244 const std::vector<T>& originalInput,
2245 const std::vector<T>& originalKernel,
2246 const std::vector<B>& bias,
2247 const std::vector<T>& originalOutputExpected,
2248 const armnn::TensorShape& originalInputShape,
2249 const armnn::TensorShape& originalKernelShape,
2250 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002251 float qScale,
2252 int32_t qOffset,
2253 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
2254 uint32_t padLeft = 0,
2255 uint32_t padTop = 0,
2256 uint32_t padRight = 0,
2257 uint32_t padBottom = 0,
2258 uint32_t strideX = 1,
2259 uint32_t strideY = 1,
2260 uint32_t dilationX = 1,
2261 uint32_t dilationY = 1)
2262{
Sadik Armagan483c8112021-06-01 09:24:52 +01002263 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
2264 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
2265 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
2266 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002267
Sadik Armagan483c8112021-06-01 09:24:52 +01002268 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
2269 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
2270 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
2271 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002272
Jan Eilers53ef7952021-06-02 12:01:25 +01002273 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
2274 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
2275 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002276
2277 bool biasEnabled = bias.size() > 0;
2278
2279 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002280 ARMNN_ASSERT(inputNum == 1);
2281 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002282
2283 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002284 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002285
2286
2287 // Note these tensors will use two (identical) batches.
2288 armnn::TensorInfo inputTensorInfo =
2289 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2290 armnn::TensorInfo outputTensorInfo =
2291 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
2292
2293 // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
Jan Eilers53ef7952021-06-02 12:01:25 +01002294 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01002295 kernelDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002296 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
Teresa Charlinee1497c2023-03-30 13:56:34 +01002297 biasDesc.SetConstant(true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002298
2299 // Set quantization parameters if the requested type is a quantized type.
2300 if(armnn::IsQuantizedType<T>())
2301 {
2302 inputTensorInfo.SetQuantizationScale(qScale);
2303 inputTensorInfo.SetQuantizationOffset(qOffset);
2304 outputTensorInfo.SetQuantizationScale(qScale);
2305 outputTensorInfo.SetQuantizationOffset(qOffset);
2306 kernelDesc.SetQuantizationScale(qScale);
2307 kernelDesc.SetQuantizationOffset(qOffset);
2308 biasDesc.SetQuantizationScale(qScale*qScale);
2309 biasDesc.SetQuantizationOffset(0);
2310 }
2311
Cathal Corbett4b19d222022-05-11 20:12:17 +01002312 std::vector<T> kernelData;
2313 kernelData.assign(originalKernel.data(), originalKernel.data() + kernelHeight*kernelWidth*outputChannels);
2314 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
2315 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
2316 {
2317 if (layout == armnn::DataLayout::NCHW)
2318 {
2319 std::vector<T> tmp(kernelData.size());
2320 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
2321 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
2322 kernelData = tmp;
2323 }
2324 }
2325
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002326 // Construct input data
2327 std::vector<T> input;
2328 input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2329 std::vector<T> inputData;
2330 inputData.insert(inputData.end(), input.begin(), input.end());
2331 inputData.insert(inputData.end(), input.begin(), input.end());
2332
2333 // at this point if we require it permute the input data
2334 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2335 if (layout == armnn::DataLayout::NHWC)
2336 {
2337 std::vector<T> tmp(inputData.size());
2338 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
2339 inputData = tmp;
2340 }
2341
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002342 std::vector<T> output;
2343 output.assign(originalOutputExpected.data(),
2344 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2345
2346 // Apply bias to output data if it is enabled.
2347 if(biasEnabled)
2348 {
2349 std::vector<T> biasV;
2350 biasV.assign(bias.data(), bias.data() + outputChannels);
2351 ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
2352 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2353 outputWidth, outputHeight);
2354 }
2355
Sadik Armagan483c8112021-06-01 09:24:52 +01002356 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2357
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002358 // Construct expected output data
2359 std::vector<T> outputData;
2360 outputData.insert(outputData.end(), output.begin(), output.end());
2361 outputData.insert(outputData.end(), output.begin(), output.end());
2362
2363 // at this point if we require it permute the expected output
2364 if (layout == armnn::DataLayout::NHWC)
2365 {
2366 std::vector<T> tmp(outputData.size());
2367 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
2368 outputData = tmp;
2369 }
Keith Davisf500d6c2020-08-31 08:32:55 +01002370
2371 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01002372 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
2373 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01002374 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2375
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002376 armnn::DepthwiseConvolution2dQueueDescriptor data;
2377 armnn::WorkloadInfo info;
Cathal Corbett06902652022-04-14 17:55:11 +01002378
Cathal Corbett4b19d222022-05-11 20:12:17 +01002379 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002380
2381 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01002382 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002383 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2384
Cathal Corbett06902652022-04-14 17:55:11 +01002385 armnn::ScopedTensorHandle biasTensor(biasDesc);
2386 if (biasEnabled)
2387 {
2388 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
2389
2390 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
2391 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
2392 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
2393 }
2394
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002395 data.m_Parameters.m_StrideX = strideX;
2396 data.m_Parameters.m_StrideY = strideY;
2397 data.m_Parameters.m_PadLeft = padLeft;
2398 data.m_Parameters.m_PadRight = padRight;
2399 data.m_Parameters.m_PadTop = padTop;
2400 data.m_Parameters.m_PadBottom = padBottom;
2401 data.m_Parameters.m_BiasEnabled = biasEnabled;
2402 data.m_Parameters.m_DataLayout = layout;
2403 data.m_Parameters.m_DilationX = dilationX;
2404 data.m_Parameters.m_DilationY = dilationY;
2405
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002406 std::unique_ptr<armnn::IWorkload> workload
2407 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002408
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002409 inputHandle->Allocate();
2410 outputHandle->Allocate();
2411
Sadik Armagan483c8112021-06-01 09:24:52 +01002412 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002413
2414 ExecuteWorkload(*workload, memoryManager);
2415
Sadik Armagan483c8112021-06-01 09:24:52 +01002416 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002417
Sadik Armagan483c8112021-06-01 09:24:52 +01002418 return LayerTestResult<T, 4>(actualOutput,
2419 outputData,
2420 outputHandle->GetShape(),
2421 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002422}
2423
2424template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2425 typename T = armnn::ResolveType<ArmnnType>>
2426LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
2427 armnn::IWorkloadFactory& workloadFactory,
2428 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002429 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002430 float qScale,
2431 int32_t qOffset,
2432 bool biasEnabled,
2433 const armnn::DataLayout layout)
2434{
2435 // Use a single-batch 2-channel 5x5 image as input.
2436 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002437 auto input = QuantizedVector<T>(
2438 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002439 0, 1, 2, 3, 4,
2440 5, 6, 7, 8, 9,
2441 10, 11, 12, 13, 14,
2442 15, 16, 17, 18, 19,
2443 20, 21, 22, 23, 24,
2444
2445 25, 26, 27, 28, 29,
2446 30, 31, 32, 33, 34,
2447 35, 36, 37, 38, 39,
2448 40, 41, 42, 43, 44,
2449 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002450 },
2451 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002452 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002453
2454 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Jan Eilers53ef7952021-06-02 12:01:25 +01002455 // Weights layout for depthwise: [1,H,W,I*M]
2456 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2457 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002458 32, 31, 30, 29,
2459 28, 27, 26, 25,
2460 24, 23, 22, 21,
2461 20, 19, 18, 17,
2462
2463 16, 15, 14, 13,
2464 12, 11, 10, 9,
2465 8, 7, 6, 5,
2466 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002467 },
2468 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002469 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002470
2471 // Expected output is 1 batch of a 2-channel 5x5 image.
2472 // Calculated using the python tensorflow library with strideX=1, strideY=1.
2473 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002474 auto expectedOutput = QuantizedVector<T>(
2475 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002476 396, 664, 820, 756, 602, 1016, 1608, 1880, 1652, 1268, 1976, 2968, 3240, 2732,
2477 2028, 2628, 3808, 4060, 3312, 2390, 2596, 3700, 3900, 3130, 2226, 2817, 4186,
2478 4330, 3609, 2651, 5414, 7864, 8120, 6626, 4780, 6314, 9144, 9400, 7646, 5500,
2479 6759, 9610, 9850, 7875, 5579, 5935, 8348, 8540, 6757, 4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002480 },
2481 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002482 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002483
2484 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2485 workloadFactory,
2486 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002487 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002488 input,
2489 kernel,
2490 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2491 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002492 inputTensorInfo.GetShape(),
2493 kernelTensorInfo.GetShape(),
2494 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002495 qScale,
2496 qOffset,
2497 layout,
2498 1, // Padding left.
2499 1, // Padding top.
2500 2, // Padding right.
2501 2, // Padding bottom.
2502 1, // strideX
2503 1); // strideY
2504}
2505
2506template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2507 typename T = armnn::ResolveType<ArmnnType>>
2508LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
2509 armnn::IWorkloadFactory& workloadFactory,
2510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002511 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002512 float qScale,
2513 int32_t qOffset,
2514 bool biasEnabled)
2515{
2516 auto layout = armnn::DataLayout::NHWC;
2517
2518 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002519 auto input = QuantizedVector<T>(
2520 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002521 0, 1, 2, 3, 4,
2522 5, 6, 7, 8, 9,
2523 10, 11, 12, 13, 14,
2524 15, 16, 17, 18, 19,
2525 20, 21, 22, 23, 24,
2526
2527 25, 26, 27, 28, 29,
2528 30, 31, 32, 33, 34,
2529 35, 36, 37, 38, 39,
2530 40, 41, 42, 43, 44,
2531 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002532 },
2533 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002534 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002535
Jan Eilers53ef7952021-06-02 12:01:25 +01002536 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2537 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002538 32, 31, 30, 29,
2539 28, 27, 26, 25,
2540 24, 23, 22, 21,
2541 20, 19, 18, 17,
2542
2543 16, 15, 14, 13,
2544 12, 11, 10, 9,
2545 8, 7, 6, 5,
2546 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002547 },
2548 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002549 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002550
2551 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002552 auto expectedOutput = QuantizedVector<T>(
2553 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002554 396,664,820,756,602,
2555 1016,1608,1880,1652,1268,
2556 1976,2968,3240,2732,2028,
2557 2628,3808,4060,3312,2390,
2558 2596,3700,3900,3130,2226,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002559
Jan Eilers53ef7952021-06-02 12:01:25 +01002560 2817,4186,4330,3609,2651,
2561 5414,7864,8120,6626,4780,
2562 6314,9144,9400,7646,5500,
2563 6759,9610,9850,7875,5579,
2564 5935,8348,8540,6757,4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002565 },
2566 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002567 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002568
2569 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2570 workloadFactory,
2571 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002572 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002573 input,
2574 kernel,
2575 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2576 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002577 inputTensorInfo.GetShape(),
2578 kernelTensorInfo.GetShape(),
2579 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002580 qScale,
2581 qOffset,
2582 layout,
2583 1, // Padding left.
2584 1, // Padding top.
2585 2, // Padding right.
2586 2, // Padding bottom.
2587 1, // strideX
2588 1); // strideY
2589}
2590
2591template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2592 typename T = armnn::ResolveType<ArmnnType>>
2593LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
2594 armnn::IWorkloadFactory& workloadFactory,
2595 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002596 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002597 float qScale,
2598 int32_t qOffset,
2599 bool biasEnabled)
2600{
2601 auto layout = armnn::DataLayout::NHWC;
2602
Sadik Armagan483c8112021-06-01 09:24:52 +01002603 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
2604 auto input = QuantizedVector<T>(
2605 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002606 0, 0, 0, 0, 0, 0, 0, 0, 0,
2607 0, 0, 0, 0, 0, 0, 0, 0, 0,
2608 0, 0, 0, 0, 0, 0, 0, 0, 0,
2609 0, 0, 0, 1, 1, 1, 0, 0, 0,
2610 0, 0, 0, 1, 1, 1, 0, 0, 0,
2611 0, 0, 0, 1, 1, 1, 0, 0, 0,
2612 0, 0, 0, 0, 0, 0, 0, 0, 0,
2613 0, 0, 0, 0, 0, 0, 0, 0, 0,
2614 0, 0, 0, 0, 0, 0, 0, 0, 0
2615 },
2616 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002617 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002618
Jan Eilers53ef7952021-06-02 12:01:25 +01002619 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
2620 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002621 1, 2, 3,
2622 4, 5, 6,
2623 7, 8, 9
2624 },
2625 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002626 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002627
2628 uint32_t padLeft = 0;
2629 uint32_t padTop = 0;
2630 uint32_t padRight = 0;
2631 uint32_t padBottom = 0;
2632 uint32_t strideX = 1;
2633 uint32_t strideY = 1;
2634 uint32_t dilationX = 3;
2635 uint32_t dilationY = 3;
2636
2637 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Sadik Armagan483c8112021-06-01 09:24:52 +01002638 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
2639 auto expectedOutput = QuantizedVector<T>(
2640 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002641 5, 5, 5,
2642 5, 5, 5,
2643 5, 5, 5
2644 },
2645 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002646 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002647
2648 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2649 workloadFactory,
2650 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002651 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002652 input,
2653 kernel,
2654 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2655 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002656 inputTensorInfo.GetShape(),
2657 kernelTensorInfo.GetShape(),
2658 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002659 qScale,
2660 qOffset,
2661 layout,
2662 padLeft,
2663 padTop,
2664 padRight,
2665 padBottom,
2666 strideX,
2667 strideY,
2668 dilationX,
2669 dilationY);
2670}
2671
2672template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
2673LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
2674 armnn::IWorkloadFactory& workloadFactory,
2675 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002676 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002677 const std::vector<float>& inputNoQuantizedValues,
2678 armnn::TensorInfo& inputTensorInfo,
2679 const std::vector<float>& kernelNoQuantizedValues,
2680 armnn::TensorInfo& kernelTensorInfo,
2681 const std::vector<float>& outputExpectedNoQuantizedValues,
2682 armnn::TensorInfo& outputTensorInfo,
2683 uint32_t dilationX,
2684 uint32_t dilationY,
2685 armnn::DataLayout layout = armnn::DataLayout::NCHW,
2686 bool biasEnabled = false)
2687{
2688 float qScale;
2689 int32_t qOffset;
2690 switch (ArmnnType)
2691 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002692 case armnn::DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +00002693 case armnn::DataType::QAsymmU8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002694 {
2695 qScale = 0.1f;
2696 qOffset = 128;
2697 break;
2698 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00002699 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002700 {
2701 qScale = 0.1f;
2702 qOffset = 0;
2703 break;
2704 }
2705 case armnn::DataType::Float32:
2706 default:
2707 {
2708 qScale = 0.f;
2709 qOffset = 0;
2710 break;
2711 }
2712 }
2713
2714 inputTensorInfo.SetQuantizationScale(qScale);
2715 inputTensorInfo.SetQuantizationOffset(qOffset);
2716 kernelTensorInfo.SetQuantizationScale(qScale);
2717 kernelTensorInfo.SetQuantizationOffset(qOffset);
2718 outputTensorInfo.SetQuantizationScale(qScale);
2719 outputTensorInfo.SetQuantizationOffset(qOffset);
2720
Sadik Armagan483c8112021-06-01 09:24:52 +01002721 auto input = QuantizedVector<T>(inputNoQuantizedValues,
2722 inputTensorInfo.GetQuantizationScale(),
2723 inputTensorInfo.GetQuantizationOffset());
2724 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
2725 kernelTensorInfo.GetQuantizationScale(),
2726 kernelTensorInfo.GetQuantizationOffset());
2727 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
2728 outputTensorInfo.GetQuantizationScale(),
2729 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002730
2731 uint32_t padLeft = 0;
2732 uint32_t padTop = 0;
2733 uint32_t padRight = 0;
2734 uint32_t padBottom = 0;
2735 uint32_t strideX = 1;
2736 uint32_t strideY = 1;
2737
2738 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2739 workloadFactory,
2740 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002741 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002742 input,
2743 kernel,
2744 GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2745 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002746 inputTensorInfo.GetShape(),
2747 kernelTensorInfo.GetShape(),
2748 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002749 qScale,
2750 qOffset,
2751 layout,
2752 padLeft,
2753 padTop,
2754 padRight,
2755 padBottom,
2756 strideX,
2757 strideY,
2758 dilationX,
2759 dilationY);
2760}
2761
2762template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2763LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
2764 armnn::IWorkloadFactory& workloadFactory,
2765 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002766 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002767 bool biasEnabled,
2768 const armnn::DataLayout layout)
2769{
2770 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
2771 std::vector<float> inputNoQuantizedValues =
2772 {
2773 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2774 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2775 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2776 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2777 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2778 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2780 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2781 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2782 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2783 };
2784
Jan Eilers53ef7952021-06-02 12:01:25 +01002785 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002786 std::vector<float> kernelNoQuantizedValues =
2787 {
2788 1, 2, 3,
2789 4, 5, 6,
2790 7, 8, 9
2791 };
2792
2793 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2794 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2795 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
2796 std::vector<float> outputExpectedNoQuantizedValues =
2797 {
2798 6., 5., 5., 5.,
2799 6., 5., 5., 5.,
2800 6., 5., 5., 5.,
2801 3., 2., 2., 2.
2802 };
2803
2804 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2805 workloadFactory,
2806 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002807 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002808 inputNoQuantizedValues,
2809 inputTensorInfo,
2810 kernelNoQuantizedValues,
2811 kernelTensorInfo,
2812 outputExpectedNoQuantizedValues,
2813 outputTensorInfo,
2814 3,
2815 3,
2816 layout,
2817 biasEnabled);
2818}
2819
2820template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2821LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002824 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002825 bool biasEnabled,
2826 const armnn::DataLayout layout)
2827{
2828 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
2829 std::vector<float> inputNoQuantizedValues =
2830 {
2831 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2834 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2835 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2836 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2837 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2838 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2839 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2840 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2841
2842 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2843 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2844 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2845 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2846 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2847 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2848 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2849 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2850 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2851 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2852 };
2853
Jan Eilers53ef7952021-06-02 12:01:25 +01002854 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 2}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002855 std::vector<float> kernelNoQuantizedValues =
2856 {
2857 1, 2, 3,
2858 4, 5, 6,
2859 7, 8, 9,
2860
2861 1, 2, 3,
2862 4, 5, 6,
2863 7, 8, 9
2864 };
2865
2866 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2867 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2868 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
2869 std::vector<float> outputExpectedNoQuantizedValues =
2870 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002871 2, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 5, 3, 3, 3, 3,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002872
Jan Eilers53ef7952021-06-02 12:01:25 +01002873 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 6, 4, 4, 4
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002874 };
2875
2876 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2877 workloadFactory,
2878 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002879 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002880 inputNoQuantizedValues,
2881 inputTensorInfo,
2882 kernelNoQuantizedValues,
2883 kernelTensorInfo,
2884 outputExpectedNoQuantizedValues,
2885 outputTensorInfo,
2886 3,
2887 3,
2888 layout,
2889 biasEnabled);
2890}
2891
2892template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2893LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
2894 armnn::IWorkloadFactory& workloadFactory,
2895 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002896 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002897 bool biasEnabled,
2898 const armnn::DataLayout layout)
2899{
2900 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2901 std::vector<float> inputNoQuantizedValues =
2902 {
2903 10.0, 10.0, 10.0,
2904 10.0, 10.0, 10.0,
2905 10.0, 10.0, 10.0,
2906
2907 21.0, 22.0, 23.0,
2908 24.0, 25.0, 26.0,
2909 27.0, 28.0, 29.0
2910 };
2911
Jan Eilers53ef7952021-06-02 12:01:25 +01002912 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 8}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002913
2914 std::vector<float> kernelNoQuantizedValues =
2915 {
2916 0.25f, 0.25f,
2917 0.25f, 0.25f,
2918
2919 0.25f, 0.25f,
2920 0.25f, 0.25f,
2921
2922 0.0f , 0.0f,
2923 0.0f , 0.1f,
2924
2925 0.0f , 0.0f,
2926 0.0f , 0.1f,
2927
2928 0.2f , 0.0f,
2929 0.0f , 0.0f,
2930
2931 0.2f , 0.0f,
2932 0.0f , 0.0f,
2933
2934 0.0f , 0.3f,
2935 0.0f , 0.0f,
2936
2937 0.0f , 0.3f,
2938 0.0f , 0.0f
2939 };
2940
2941 armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType);
2942 std::vector<float> outputExpectedNoQuantizedValues =
2943 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002944 4.5f, 4.5f, 4.5f, 4.5f, 5.5f, 5.5f, 5.5f, 5.5f,
2945 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f,
2946 10.05f, 10.5f, 11.4f, 11.85f, 12.75f, 13.3f, 14.4f, 14.95f,
2947 5.25f, 5.5f, 6.0f, 6.25f, 7.45f, 7.8f, 8.5f, 8.85f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002948 };
2949
2950
2951 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2952 workloadFactory,
2953 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002954 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002955 inputNoQuantizedValues,
2956 inputTensorInfo,
2957 kernelNoQuantizedValues,
2958 kernelTensorInfo,
2959 outputExpectedNoQuantizedValues,
2960 outputTensorInfo,
2961 1,
2962 1,
2963 layout,
2964 biasEnabled);
2965}
2966
2967template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2968LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
2969 armnn::IWorkloadFactory& workloadFactory,
2970 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002971 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002972 bool biasEnabled,
2973 const armnn::DataLayout layout)
2974{
2975 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2976 std::vector<float> inputNoQuantizedValues =
2977 {
2978 10.0, 10.0, 10.0,
2979 10.0, 10.0, 10.0,
2980 10.0, 10.0, 10.0,
2981
2982 21.0, 22.0, 23.0,
2983 24.0, 25.0, 26.0,
2984 27.0, 28.0, 29.0
2985 };
2986
Jan Eilers53ef7952021-06-02 12:01:25 +01002987 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 4}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002988
2989 std::vector<float> kernelNoQuantizedValues =
2990 {
2991 0.25f, 0.25f,
2992 0.25f, 0.25f,
2993
2994 0.2f , 0.0f,
2995 0.0f , 0.0f,
2996
2997 0.0f , 0.0f,
2998 0.0f , 0.1f,
2999
3000 0.0f , 0.3f,
3001 0.0f , 0.0f
3002
3003 };
3004
3005 armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType);
3006 std::vector<float> outputExpectedNoQuantizedValues =
3007 {
Jan Eilers53ef7952021-06-02 12:01:25 +01003008 4.5f, 4.5f, 4.5f, 4.5f,
3009 5.5f, 5.5f, 5.5f, 5.5f,
3010 5.25f, 5.5f, 6.0f, 6.25f,
3011 7.65f, 8.0f, 8.7f, 9.05f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003012 };
3013
3014
3015 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
3016 workloadFactory,
3017 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003018 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003019 inputNoQuantizedValues,
3020 inputTensorInfo,
3021 kernelNoQuantizedValues,
3022 kernelTensorInfo,
3023 outputExpectedNoQuantizedValues,
3024 outputTensorInfo,
3025 1,
3026 1,
3027 layout,
3028 biasEnabled);
3029}
3030
3031template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
3032LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
3033 armnn::IWorkloadFactory& workloadFactory,
3034 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3035 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003036 const armnn::ITensorHandleFactory& tensorHandleFactory,
3037 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003038 const armnnUtils::DataLayoutIndexed& layout)
3039{
3040 unsigned int inputHeight = 8;
3041 unsigned int inputWidth = 16;
3042 unsigned int inputChannels = 3;
3043 unsigned int inputNum = 5;
3044
3045 unsigned int kernelHeight = 3;
3046 unsigned int kernelWidth = 3;
3047 unsigned int channelMultiplier = 1;
3048
3049 unsigned int strideX = 2;
3050 unsigned int strideY = 3;
3051 unsigned int padX = 1;
3052 unsigned int padY = 1;
3053
3054 unsigned int outputNum = inputNum;
3055 unsigned int outputChannels = inputChannels * channelMultiplier;
3056 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
3057 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
3058
3059 armnn::TensorInfo inputTensorInfo;
3060 armnn::TensorInfo outputTensorInfo;
3061 armnn::TensorInfo kernelDesc;
3062 armnn::TensorInfo biasDesc;
3063
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003064 std::vector<unsigned int> inputShape;
3065 std::vector<unsigned int> outputShape;
Jan Eilers53ef7952021-06-02 12:01:25 +01003066 std::vector<unsigned int> kernelShape{ 1, kernelHeight, kernelWidth, outputChannels };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003067 std::vector<unsigned int> biasShape{ outputChannels };
3068 switch (layout.GetDataLayout())
3069 {
3070 case armnn::DataLayout::NCHW:
3071 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
3072 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
3073 break;
3074 case armnn::DataLayout ::NHWC:
3075 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
3076 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
3077 break;
3078 default:
3079 throw armnn::InvalidArgumentException("unknown data layout ["
3080 + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
3081 }
3082
3083 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
3084 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
3085 int32_t qOffset = 0;
3086
3087 inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
3088 outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
Teresa Charlinee1497c2023-03-30 13:56:34 +01003089 kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset, true);
3090 biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset, true);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003091
Sadik Armagan483c8112021-06-01 09:24:52 +01003092 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
3093 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
3094 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003095
Cathal Corbett4b19d222022-05-11 20:12:17 +01003096 armnn::TensorInfo aclKernelDescriptor = kernelDesc;
3097 std::vector<T> aclKernelData;
3098 aclKernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
3099 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
3100 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
3101 {
3102 if (layout == armnn::DataLayout::NCHW)
3103 {
3104 std::vector<T> tmp(kernel.size());
3105 aclKernelDescriptor.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
3106 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernel.data(), tmp.data(), sizeof(T));
3107 aclKernelData = tmp;
3108 }
3109 }
3110
Sadik Armagan483c8112021-06-01 09:24:52 +01003111 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
3112 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01003113
3114 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett4b19d222022-05-11 20:12:17 +01003115 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(aclKernelDescriptor);
Cathal Corbett06902652022-04-14 17:55:11 +01003116 std::unique_ptr<armnn::ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01003117 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
3118
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003119 armnn::DepthwiseConvolution2dQueueDescriptor data;
3120 armnn::WorkloadInfo info;
Cathal Corbett06902652022-04-14 17:55:11 +01003121
Cathal Corbett06902652022-04-14 17:55:11 +01003122 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett4b19d222022-05-11 20:12:17 +01003123 AddInputToWorkload(data, info, aclKernelDescriptor, weightsHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01003124 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
3125 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3126
Cathal Corbett4b19d222022-05-11 20:12:17 +01003127 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), aclKernelData.data());
Cathal Corbett06902652022-04-14 17:55:11 +01003128 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003129
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003130 data.m_Parameters.m_StrideX = strideX;
3131 data.m_Parameters.m_StrideY = strideY;
3132 data.m_Parameters.m_PadLeft = padX;
3133 data.m_Parameters.m_PadRight = padX;
3134 data.m_Parameters.m_PadTop = padY;
3135 data.m_Parameters.m_PadBottom = padY;
3136 data.m_Parameters.m_BiasEnabled = true;
3137 data.m_Parameters.m_DataLayout = layout.GetDataLayout();
Keith Davisf500d6c2020-08-31 08:32:55 +01003138
3139 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01003140 std::unique_ptr<armnn::ITensorHandle> weightsHandleRef = refTensorHandleFactory.CreateTensorHandle(kernelDesc);
3141 std::unique_ptr<armnn::ITensorHandle> biasHandleRef = refTensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01003142 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
3143
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003144 armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
3145 armnn::WorkloadInfo refInfo = info;
3146 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
Cathal Corbett06902652022-04-14 17:55:11 +01003147 SetWorkloadInput(refData, refInfo, 1, kernelDesc, weightsHandleRef.get());
3148 SetWorkloadInput(refData, refInfo, 2, biasDesc, biasHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003149 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3150
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003151 std::unique_ptr<armnn::IWorkload> workload
3152 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
3153 std::unique_ptr<armnn::IWorkload> workloadRef
3154 = refWorkloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003155
3156 outputHandleRef->Allocate();
Cathal Corbett06902652022-04-14 17:55:11 +01003157 weightsHandleRef->Allocate();
3158 biasHandleRef->Allocate();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003159 inputHandleRef->Allocate();
3160
3161 inputHandle->Allocate();
3162 outputHandle->Allocate();
3163
Sadik Armagan483c8112021-06-01 09:24:52 +01003164 CopyDataToITensorHandle(inputHandle.get(), input.data());
3165 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Cathal Corbett06902652022-04-14 17:55:11 +01003166 CopyDataToITensorHandle(weightsHandleRef.get(), kernel.data());
3167 CopyDataToITensorHandle(biasHandleRef.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003168
3169 ExecuteWorkload(*workload, memoryManager);
3170
3171 workloadRef->PostAllocationConfigure();
3172 workloadRef->Execute();
3173
Sadik Armagan483c8112021-06-01 09:24:52 +01003174 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3175 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003176
Sadik Armagan483c8112021-06-01 09:24:52 +01003177 return LayerTestResult<T, 4>(actualOutput,
3178 expectedOutput,
3179 outputHandle->GetShape(),
3180 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003181}
3182
3183//
3184// Explicit template specializations
3185//
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003186template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3187Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3188 armnn::IWorkloadFactory&,
3189 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003190 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003191 bool,
3192 armnn::DataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003193
3194template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3195Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3196 armnn::IWorkloadFactory&,
3197 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003198 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003199 bool,
3200 armnn::DataLayout);
3201
Sadik Armagan303980c2020-04-17 12:45:14 +01003202template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3203Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3204 armnn::IWorkloadFactory&,
3205 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003206 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003207 bool,
3208 armnn::DataLayout);
3209
Derek Lambertif90c56d2020-01-10 17:14:08 +00003210template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3211Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003212 armnn::IWorkloadFactory&,
3213 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003214 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003215 bool,
3216 armnn::DataLayout);
3217
Derek Lambertif90c56d2020-01-10 17:14:08 +00003218template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3219Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003220 armnn::IWorkloadFactory&,
3221 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003222 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003223 bool,
3224 armnn::DataLayout);
3225
3226template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3227Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3228 armnn::IWorkloadFactory&,
3229 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003230 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003231 bool,
3232 armnn::DataLayout);
3233
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003234template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3235Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3236 armnn::IWorkloadFactory&,
3237 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003238 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003239 bool,
3240 armnn::DataLayout);
3241
Sadik Armagan303980c2020-04-17 12:45:14 +01003242template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3243Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3244 armnn::IWorkloadFactory&,
3245 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003246 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003247 bool,
3248 armnn::DataLayout);
3249
Derek Lambertif90c56d2020-01-10 17:14:08 +00003250template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3251Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003252 armnn::IWorkloadFactory&,
3253 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003254 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003255 bool,
3256 armnn::DataLayout);
3257
Derek Lambertif90c56d2020-01-10 17:14:08 +00003258template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3259Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003260 armnn::IWorkloadFactory&,
3261 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003262 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003263 bool,
3264 armnn::DataLayout);
3265
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003266template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3267Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3268 armnn::IWorkloadFactory &workloadFactory,
3269 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003270 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003271 bool biasEnabled,
3272 const armnn::DataLayout layout);
3273
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003274template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3275Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3276 armnn::IWorkloadFactory &workloadFactory,
3277 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003278 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003279 bool biasEnabled,
3280 const armnn::DataLayout layout);
3281
Sadik Armagan303980c2020-04-17 12:45:14 +01003282template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3283Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3284 armnn::IWorkloadFactory &workloadFactory,
3285 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003286 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +01003287 bool biasEnabled,
3288 const armnn::DataLayout layout);
3289
Derek Lambertif90c56d2020-01-10 17:14:08 +00003290template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3291Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003292 armnn::IWorkloadFactory &workloadFactory,
3293 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003294 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003295 bool biasEnabled,
3296 const armnn::DataLayout layout);
3297
Derek Lambertif90c56d2020-01-10 17:14:08 +00003298template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3299Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003300 armnn::IWorkloadFactory &workloadFactory,
3301 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003302 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003303 bool biasEnabled,
3304 const armnn::DataLayout layout);
3305
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003306template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3307DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3308 armnn::IWorkloadFactory&,
3309 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003310 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003311 bool,
3312 armnn::DataLayout);
3313
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003314template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3315DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3316 armnn::IWorkloadFactory&,
3317 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003318 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003319 bool,
3320 armnn::DataLayout);
3321
Sadik Armagan303980c2020-04-17 12:45:14 +01003322template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3323DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3324 armnn::IWorkloadFactory&,
3325 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003326 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003327 bool,
3328 armnn::DataLayout);
3329
Derek Lambertif90c56d2020-01-10 17:14:08 +00003330template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3331DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003332 armnn::IWorkloadFactory&,
3333 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003334 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003335 bool,
3336 armnn::DataLayout);
3337
Derek Lambertif90c56d2020-01-10 17:14:08 +00003338template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3339DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003340 armnn::IWorkloadFactory&,
3341 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003342 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003343 bool,
3344 armnn::DataLayout);
3345
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003346template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3347DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3348 armnn::IWorkloadFactory&,
3349 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003350 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003351 bool,
3352 armnn::DataLayout);
3353
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003354template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3355DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3356 armnn::IWorkloadFactory&,
3357 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003358 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003359 bool,
3360 armnn::DataLayout);
3361
Sadik Armagan303980c2020-04-17 12:45:14 +01003362template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3363DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3364 armnn::IWorkloadFactory&,
3365 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003366 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003367 bool,
3368 armnn::DataLayout);
3369
Derek Lambertif90c56d2020-01-10 17:14:08 +00003370template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3371DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003372 armnn::IWorkloadFactory&,
3373 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003374 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003375 bool,
3376 armnn::DataLayout);
3377
Derek Lambertif90c56d2020-01-10 17:14:08 +00003378template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3379DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003380 armnn::IWorkloadFactory&,
3381 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003382 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003383 bool,
3384 armnn::DataLayout);
3385
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003386template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3387DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3388 armnn::IWorkloadFactory &workloadFactory,
3389 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003390 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003391 bool biasEnabled,
3392 const armnn::DataLayout layout);
3393
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003394template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3395DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3396 armnn::IWorkloadFactory &workloadFactory,
3397 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003398 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003399 bool biasEnabled,
3400 const armnn::DataLayout layout);
3401
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003402template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3403DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3404 armnn::IWorkloadFactory &workloadFactory,
3405 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003406 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003407 bool biasEnabled,
3408 const armnn::DataLayout layout);
3409
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003410template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3411DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3412 armnn::IWorkloadFactory &workloadFactory,
3413 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003414 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003415 bool biasEnabled,
3416 const armnn::DataLayout layout);
3417
3418//
3419// Implementation functions
3420//
3421
3422LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
3423 armnn::IWorkloadFactory& workloadFactory,
3424 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003425 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003426 bool biasEnabled,
3427 const armnn::DataLayout layout)
3428{
3429 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003430 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003431}
3432
3433LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
3434 armnn::IWorkloadFactory& workloadFactory,
3435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003436 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003437 bool biasEnabled,
3438 const armnn::DataLayout layout)
3439{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003440 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003441 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003442}
3443
3444LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
3445 armnn::IWorkloadFactory& workloadFactory,
3446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003447 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003448 bool biasEnabled,
3449 const armnn::DataLayout layout)
3450{
3451 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003452 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003453}
3454
3455LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
3456 armnn::IWorkloadFactory& workloadFactory,
3457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003458 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003459 bool biasEnabled)
3460{
3461 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3462 workloadFactory,
3463 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003464 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003465 0.f,
3466 0,
3467 biasEnabled,
3468 armnn::DataLayout::NHWC);
3469}
3470
3471LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
3472 armnn::IWorkloadFactory& workloadFactory,
3473 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003474 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003475 bool biasEnabled,
3476 const armnn::DataLayout layout)
3477{
3478 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3479 workloadFactory,
3480 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003481 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003482 0.f,
3483 0,
3484 biasEnabled,
3485 layout);
3486}
3487
3488LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
3489 armnn::IWorkloadFactory& workloadFactory,
3490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003491 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003492 bool biasEnabled,
3493 const armnn::DataLayout layout)
3494{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003495 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003496 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003497}
3498
3499LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
3500 armnn::IWorkloadFactory& workloadFactory,
3501 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003502 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003503 bool biasEnabled,
3504 const armnn::DataLayout layout)
3505{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003506 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003507 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003508}
3509
3510LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
3511 armnn::IWorkloadFactory& workloadFactory,
3512 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003513 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003514 bool biasEnabled,
3515 const armnn::DataLayout layout)
3516{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003517 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003518 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003519}
3520
3521LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
3522 armnn::IWorkloadFactory& workloadFactory,
3523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003524 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003525 armnn::DataLayout layout)
3526{
3527 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003528 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003529}
3530
3531LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
3532 armnn::IWorkloadFactory& workloadFactory,
3533 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003534 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003535 armnn::DataLayout layout)
3536{
3537 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
3538 <armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003539 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003540}
3541
3542LayerTestResult<float, 4> Convolution1dTest(
3543 armnn::IWorkloadFactory& workloadFactory,
3544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003545 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003546 bool biasEnabled)
3547{
3548 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003549 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003550}
3551
3552LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
3553 armnn::IWorkloadFactory& workloadFactory,
3554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003555 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003556 bool biasEnabled)
3557{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003558 return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003559 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003560}
3561
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003562LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
3563 armnn::IWorkloadFactory& workloadFactory,
3564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003565 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003566 const armnn::DataLayout layout)
3567{
3568 using namespace armnn;
3569
Derek Lambertif90c56d2020-01-10 17:14:08 +00003570 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003571 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003572 const DataType biasType = DataType::Signed32;
3573
3574 TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3575 TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3576
3577 const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3578 constexpr unsigned int quantDimension = 0;
3579
Teresa Charlinee1497c2023-03-30 13:56:34 +01003580 TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension, true);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003581
3582 const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
Teresa Charlinee1497c2023-03-30 13:56:34 +01003583 TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension, true);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003584
3585 std::vector<uint8_t> inputData =
3586 {
3587 138, 108, 138, 108, 138, 108
3588 };
3589
3590 std::vector<int8_t> kernelData =
3591 {
3592 1, 2, 1, 2, 1, 2
3593 };
3594
3595 std::vector<int32_t> biasData =
3596 {
3597 4, 4, 4
3598 };
3599
3600 std::vector<uint8_t> expectedOutputData =
3601 {
3602 121, 118, 115, 121, 118, 115, 121, 118, 115
3603 };
3604
3605 if (layout == DataLayout::NCHW)
3606 {
3607 PermuteTensorNhwcToNchw(inputInfo, inputData);
3608 PermuteTensorNhwcToNchw(kernelInfo, kernelData);
3609 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3610 }
3611
Sadik Armagan483c8112021-06-01 09:24:52 +01003612 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3613
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003614 Convolution2dDescriptor descriptor;
3615 descriptor.m_StrideX = 1;
3616 descriptor.m_StrideY = 1;
3617 descriptor.m_PadLeft = 0;
3618 descriptor.m_PadRight = 0;
3619 descriptor.m_PadTop = 0;
3620 descriptor.m_PadBottom = 0;
3621 descriptor.m_BiasEnabled = true;
3622 descriptor.m_DataLayout = layout;
3623
Keith Davisf500d6c2020-08-31 08:32:55 +01003624 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3625 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003626 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
3627 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01003628
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003629 WorkloadInfo workloadInfo;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003630
3631 Convolution2dQueueDescriptor queueDescriptor;
3632 queueDescriptor.m_Parameters = descriptor;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003633
3634 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003635 AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
3636
3637 if (descriptor.m_BiasEnabled)
3638 {
3639 biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
3640 AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
3641 }
3642
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003643 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3644
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003645 std::unique_ptr<IWorkload> workload= workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
3646 queueDescriptor,
3647 workloadInfo);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003648 inputHandle->Allocate();
3649 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003650 weightsHandle->Allocate();
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003651
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003652 if (descriptor.m_BiasEnabled)
3653 {
3654 biasHandle->Allocate();
3655 CopyDataToITensorHandle(biasHandle.get(), biasData.data());
3656 }
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003657 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003658 CopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
3659
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003660
3661 ExecuteWorkload(*workload, memoryManager);
3662
Sadik Armagan483c8112021-06-01 09:24:52 +01003663 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003664
Sadik Armagan483c8112021-06-01 09:24:52 +01003665 return LayerTestResult<uint8_t, 4>(actualOutput,
3666 expectedOutputData,
3667 outputHandle->GetShape(),
3668 outputInfo.GetShape());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003669}
3670
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003671LayerTestResult<float,4> CompareConvolution2dTest(
3672 armnn::IWorkloadFactory& workloadFactory,
3673 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003674 armnn::IWorkloadFactory& refWorkloadFactory,
3675 const armnn::ITensorHandleFactory& tensorHandleFactory,
3676 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003677{
3678 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003679 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003680}
3681
3682LayerTestResult<float, 4> DepthwiseConvolution2dTest(
3683 armnn::IWorkloadFactory& workloadFactory,
3684 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003685 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003686 bool biasEnabled,
3687 const armnn::DataLayout layout)
3688{
3689 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003690 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003691}
3692
3693LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
3694 armnn::IWorkloadFactory& workloadFactory,
3695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003696 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003697 bool biasEnabled)
3698{
3699 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003700 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003701}
3702
3703LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
3704 armnn::IWorkloadFactory& workloadFactory,
3705 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003706 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003707 bool biasEnabled,
3708 const armnn::DataLayout layout)
3709{
3710 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003711 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003712}
3713
3714LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
3715 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3717 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003718{
3719 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
Sadik Armagan483c8112021-06-01 09:24:52 +01003720 std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003721
3722 std::vector<float> kernelData;
3723 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3724 for (unsigned int i = 0; i < 64; ++i)
3725 {
3726 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3727 }
3728 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003729
Jan Eilers53ef7952021-06-02 12:01:25 +01003730 // permute from [O,1,H,W] --> [1,H,W,O]
3731 armnn::PermutationVector permutationVector {3,0,1,2};
3732 kernelTensorInfo = armnnUtils::Permuted(kernelTensorInfo, permutationVector);
3733 std::vector<float> kernelPermuted(kernelTensorInfo.GetNumElements());
3734 armnnUtils::Permute(kernelTensorInfo.GetShape(), permutationVector,
3735 kernelData.data(), kernelPermuted.data(),
3736 GetDataTypeSize(kernelTensorInfo.GetDataType()));
3737
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003738 std::vector<float> expectedOutputData(64, 0.f);
3739 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003740
3741 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3742 workloadFactory,
3743 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003744 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003745 input,
Jan Eilers53ef7952021-06-02 12:01:25 +01003746 kernelPermuted,
Sadik Armagan483c8112021-06-01 09:24:52 +01003747 std::vector<float>(),
3748 expectedOutputData,
3749 inputTensorInfo.GetShape(),
3750 kernelTensorInfo.GetShape(),
3751 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003752 0.f,
3753 0,
3754 armnn::DataLayout::NCHW);
3755}
3756
3757LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
3758 armnn::IWorkloadFactory& workloadFactory,
3759 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003760 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003761 bool biasEnabled,
3762 const armnn::DataLayout layout)
3763{
3764 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003765 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003766}
3767
3768LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
3769 armnn::IWorkloadFactory& workloadFactory,
3770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003771 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003772 bool biasEnabled,
3773 const armnn::DataLayout layout)
3774{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003775 return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003776 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003777}
3778
3779LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
3780 armnn::IWorkloadFactory& workloadFactory,
3781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003782 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003783 bool biasEnabled,
3784 const armnn::DataLayout layout)
3785{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003786 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003787 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003788}
3789
3790LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
3791 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3793 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003794{
3795 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3796 workloadFactory,
3797 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003798 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003799 0.f,
3800 0,
3801 false);
3802}
3803
3804LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
3805 armnn::IWorkloadFactory& workloadFactory,
3806 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003807 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003808 bool biasEnabled,
3809 const armnn::DataLayout layout)
3810{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003811 return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003812 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003813}
3814
3815LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
3816 armnn::IWorkloadFactory& workloadFactory,
3817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003818 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003819 bool biasEnabled,
3820 const armnn::DataLayout layout)
3821{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003822 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003823 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003824}
3825
Teresa Charlind8df0262019-11-11 12:28:15 +00003826LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
3827 armnn::IWorkloadFactory& workloadFactory,
3828 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003829 const armnn::ITensorHandleFactory& tensorHandleFactory,
Teresa Charlind8df0262019-11-11 12:28:15 +00003830 const armnn::DataLayout layout)
3831{
3832 using namespace armnn;
3833
Derek Lambertif90c56d2020-01-10 17:14:08 +00003834 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003835 const DataType kernelType = DataType::QSymmS8;
Teresa Charlind8df0262019-11-11 12:28:15 +00003836 const DataType biasType = DataType::Signed32;
3837
3838 TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128); // N H W C
3839 TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128); // N H W C
3840
3841 const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
Jan Eilers53ef7952021-06-02 12:01:25 +01003842 const unsigned int quantDimension = 3;
Teresa Charlinee1497c2023-03-30 13:56:34 +01003843 TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension, true); // [1, H, W, I*M]
Teresa Charlind8df0262019-11-11 12:28:15 +00003844
3845 const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3846 constexpr unsigned int biasQuantDimension = 0;
Teresa Charlinee1497c2023-03-30 13:56:34 +01003847 TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension, true);
Teresa Charlind8df0262019-11-11 12:28:15 +00003848
3849 std::vector<uint8_t> inputData =
3850 {
3851 129, 130,
3852 129, 130,
3853 129, 130,
3854 129, 130,
3855 129, 130,
3856 129, 130,
3857 129, 130,
3858 129, 130,
3859 129, 130
3860 };
3861
3862 std::vector<int8_t> kernelData =
3863 {
3864 1, 1, 1, 1,
3865 1, 1, 1, 1,
3866 1, 1, 1, 1,
3867 1, 1, 1, 1
3868 };
3869
Cathal Corbett4b19d222022-05-11 20:12:17 +01003870 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
3871 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
3872 {
3873 if (layout == armnn::DataLayout::NCHW)
3874 {
3875 std::vector<int8_t> tmp(kernelData.size());
3876 kernelInfo.SetShape(armnnUtils::Permuted(kernelInfo.GetShape(), {0, 2, 3, 1}));
3877 armnnUtils::Permute(kernelInfo.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(int8_t));
3878 kernelData = tmp;
3879 }
3880 }
3881
Teresa Charlind8df0262019-11-11 12:28:15 +00003882 std::vector<int32_t> biasData =
3883 {
3884 4, 4, 4, 4
3885 };
3886
3887 std::vector<uint8_t> expectedOutputData =
3888 {
3889 132, 130, 134, 131,
3890 132, 130, 134, 131,
3891 132, 130, 134, 131,
3892 132, 130, 134, 131
3893 };
3894
3895 if (layout == DataLayout::NCHW)
3896 {
3897 PermuteTensorNhwcToNchw(inputInfo, inputData);
3898 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3899 }
3900
Sadik Armagan483c8112021-06-01 09:24:52 +01003901 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3902
Teresa Charlind8df0262019-11-11 12:28:15 +00003903 DepthwiseConvolution2dDescriptor descriptor;
3904 descriptor.m_StrideX = 1;
3905 descriptor.m_StrideY = 1;
3906 descriptor.m_PadLeft = 0;
3907 descriptor.m_PadRight = 0;
3908 descriptor.m_PadTop = 0;
3909 descriptor.m_PadBottom = 0;
3910 descriptor.m_DilationX = 1;
3911 descriptor.m_DilationY = 1;
3912 descriptor.m_BiasEnabled = true;
3913 descriptor.m_DataLayout = layout;
3914
Keith Davisf500d6c2020-08-31 08:32:55 +01003915 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01003916 std::unique_ptr<ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
3917 std::unique_ptr<ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
Keith Davisf500d6c2020-08-31 08:32:55 +01003918 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003919
Cathal Corbett06902652022-04-14 17:55:11 +01003920 DepthwiseConvolution2dQueueDescriptor queueDescriptor;
Teresa Charlind8df0262019-11-11 12:28:15 +00003921 WorkloadInfo workloadInfo;
Teresa Charlind8df0262019-11-11 12:28:15 +00003922
Cathal Corbett06902652022-04-14 17:55:11 +01003923 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3924 AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
3925 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3926 AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
3927
Cathal Corbett06902652022-04-14 17:55:11 +01003928 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
Cathal Corbett06902652022-04-14 17:55:11 +01003929 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasData.data());
Teresa Charlind8df0262019-11-11 12:28:15 +00003930
Teresa Charlind8df0262019-11-11 12:28:15 +00003931 queueDescriptor.m_Parameters = descriptor;
Teresa Charlind8df0262019-11-11 12:28:15 +00003932
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003933 std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
3934 queueDescriptor,
3935 workloadInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003936 inputHandle->Allocate();
3937 outputHandle->Allocate();
3938
3939 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3940
3941 ExecuteWorkload(*workload, memoryManager);
3942
3943 LayerTestResult<uint8_t, 4> ret(outputInfo);
3944
Sadik Armagan483c8112021-06-01 09:24:52 +01003945 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Teresa Charlind8df0262019-11-11 12:28:15 +00003946
Sadik Armagan483c8112021-06-01 09:24:52 +01003947 return LayerTestResult<uint8_t, 4>(actualOutput,
3948 expectedOutputData,
3949 outputHandle->GetShape(),
3950 outputInfo.GetShape());
Teresa Charlind8df0262019-11-11 12:28:15 +00003951}
3952
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003953LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
3954 armnn::IWorkloadFactory& workloadFactory,
3955 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3956 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003957 const armnn::ITensorHandleFactory& tensorHandleFactory,
3958 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003959 const armnn::DataLayout layout)
3960{
3961 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003962 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003963}
3964
3965LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
3966 armnn::IWorkloadFactory& workloadFactory,
3967 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3968 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003969 const armnn::ITensorHandleFactory& tensorHandleFactory,
3970 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003971 const armnn::DataLayout layout)
3972{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003973 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003974 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003975}