blob: 1e0adc169ac89170502f354436dbe837992c4631 [file] [log] [blame]
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "Conv2dTestImpl.hpp"
7
Colm Donelanc42a9872022-02-02 16:35:09 +00008#include <armnnUtils/QuantizeHelper.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +00009#include <armnnUtils/TensorUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010010
Jan Eilers8eb25602020-03-09 12:13:48 +000011#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010012#include <armnn/utility/NumericCast.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000013#include <armnnUtils/DataLayoutIndexed.hpp>
14#include <armnnUtils/Permute.hpp>
15
Colm Donelan0c479742021-12-10 12:43:54 +000016#include <armnn/backends/TensorHandle.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010017
Sadik Armagana097d2a2021-11-24 15:47:28 +000018#include <armnnTestUtils/DataLayoutUtils.hpp>
19#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000020#include <armnnTestUtils/WorkloadTestUtils.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021
Colm Donelanc42a9872022-02-02 16:35:09 +000022#include <armnnTestUtils/TensorHelpers.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010024#include <string>
25
26//
27// Static data
28//
29
30// 2-channel bias used by a number of Conv2d tests.
31static std::vector<float> Bias2({0, 2});
32
33static std::vector<float> Bias4({1, 2, 3, 4});
34
35static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4});
36
37// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
38static std::vector<float> ConvInput3x8x16({
39 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
40 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
41 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
42 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
43 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
44 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
45 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
46 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
47 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
48 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
50 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
55 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
56 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
57 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
58 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
59 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
60 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
61 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
62 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
63});
64
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010065using namespace armnnUtils;
66
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010067//
68// Helper templates
69//
70
71// Helper template that returns either Bias2 or an empty vector depending on whether bias is enabled.
72template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010073std::vector<T> GetBias2(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010074{
75 if(biasEnabled)
76 {
Sadik Armagan483c8112021-06-01 09:24:52 +010077 return QuantizedVector<T>(Bias2, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010078 }
79 else
80 {
Sadik Armagan483c8112021-06-01 09:24:52 +010081 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010082 }
83}
84
85// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
86template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +010087std::vector<T> GetBias4(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010088{
89 if(biasEnabled)
90 {
Sadik Armagan483c8112021-06-01 09:24:52 +010091 return QuantizedVector<T>(Bias4, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010092 }
93 else
94 {
Sadik Armagan483c8112021-06-01 09:24:52 +010095 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010096 }
97}
98
99// Helper template that returns either Bias8 or an empty vector depending on whether bias is enabled.
100template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100101std::vector<T> GetBias8(bool biasEnabled, float qScale)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100102{
103 if(biasEnabled)
104 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100105 return QuantizedVector<T>(Bias8, qScale, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100106 }
107 else
108 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100109 return std::vector<T>();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100110 }
111}
112
113// Helper template that returns either Bias4 or an empty vector depending on whether bias is enabled.
114template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Sadik Armagan483c8112021-06-01 09:24:52 +0100115std::vector<T> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100116{
117 const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
118 const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
119 const unsigned int outputChannels = outputInfo.GetShape()[channelsIndex];
120
121 switch (outputChannels)
122 {
123 case 2:
124 default:
125 {
126 return GetBias2<ArmnnType>(biasEnabled, qScale);
127 }
128 case 4:
129 {
130 return GetBias4<ArmnnType>(biasEnabled, qScale);
131 }
132 case 8:
133 {
134 return GetBias8<ArmnnType>(biasEnabled, qScale);
135 }
136 }
137}
138
139//
140// Implementation templates
141//
142
143// Mapping from input type to bias type for fully connected layers.
144// float => float, uint8_t => int32_t
145template<typename T>
146struct FullyConnectedBiasTypeForInputType;
147
148template<>
149struct FullyConnectedBiasTypeForInputType<float>
150{
151 using Type = float;
152};
153
154template<>
155struct FullyConnectedBiasTypeForInputType<uint8_t>
156{
157 using Type = int32_t;
158};
159
160// Modifies a std::vector in-place using a specified bias.
161template<typename T, typename B>
162void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
163 const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
164{
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100165 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100166 "Invalid type and parameter combination.");
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100167 ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100168 "Invalid type and parameter combination.");
169
170 // Note we need to dequantize and re-quantize the image value and the bias.
171 for (uint32_t i = 0; i < bias.size(); ++i)
172 {
173 float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
174 for (uint32_t y = 0; y < h; ++y)
175 {
176 for (uint32_t x = 0; x < w; ++x)
177 {
178 uint32_t offset = (i * h + y) * w + x;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100179 ARMNN_ASSERT(offset < v.size());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100180 T& outRef = v[offset];
181 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
182 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
183 }
184 }
185 }
186}
187
188//
189// Convolution2d implementations
190//
191
192template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
193 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
194LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
195 armnn::IWorkloadFactory& workloadFactory,
196 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100197 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100198 const std::vector<T>& originalInput,
199 const std::vector<T>& originalKernel,
200 const std::vector<B>& bias,
201 const std::vector<T>& originalOutputExpected,
202 const armnn::TensorShape& originalInputShape,
203 const armnn::TensorShape& originalKernelShape,
204 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100205 float qScale,
206 int32_t qOffset,
207 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
208 uint32_t padLeft = 0,
209 uint32_t padTop = 0,
210 uint32_t padRight = 0,
211 uint32_t padBottom = 0,
212 uint32_t strideX = 1,
213 uint32_t strideY = 1,
214 uint32_t dilationX = 1,
215 uint32_t dilationY = 1)
216{
Jan Eilers8eb25602020-03-09 12:13:48 +0000217 armnn::IgnoreUnused(memoryManager);
Sadik Armagan483c8112021-06-01 09:24:52 +0100218 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
219 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
220 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
221 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100222
Sadik Armagan483c8112021-06-01 09:24:52 +0100223 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
224 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
225 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
226 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100227
Sadik Armagan483c8112021-06-01 09:24:52 +0100228 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
229 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
230 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
231 unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100232
233 bool biasEnabled = bias.size() > 0;
234
235 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100236 ARMNN_ASSERT(inputNum == 1);
237 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100238
239 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100240 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100241
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100242 // Note these tensors will use two (identical) batches.
243 armnn::TensorInfo inputTensorInfo =
244 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
245 armnn::TensorInfo outputTensorInfo =
246 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
247 armnn::TensorInfo kernelDesc =
248 armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
249 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
250
251 // Set quantization parameters if the requested type is a quantized type.
252 if(armnn::IsQuantizedType<T>())
253 {
254 inputTensorInfo.SetQuantizationScale(qScale);
255 inputTensorInfo.SetQuantizationOffset(qOffset);
256 outputTensorInfo.SetQuantizationScale(qScale);
257 outputTensorInfo.SetQuantizationOffset(qOffset);
258 kernelDesc.SetQuantizationScale(qScale);
259 kernelDesc.SetQuantizationOffset(qOffset);
260 biasDesc.SetQuantizationScale(qScale*qScale);
261 biasDesc.SetQuantizationOffset(0);
262 }
263
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100264 // Construct input data - two batches of the same input image.
265 std::vector<T> inputImage;
266 inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
267 std::vector<T> inputData;
268 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
269 inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
270
271 // at this point if we require it permute the input data
272 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
273 if (layout == armnn::DataLayout::NHWC)
274 {
275 std::vector<T> tmp(inputData.size());
276 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
277 inputData = tmp;
278 }
279
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100280 std::vector<T> outputImage;
281 outputImage.assign(originalOutputExpected.data(),
282 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
283
284 // Apply bias to output image if it is enabled.
285 if(biasEnabled)
286 {
287 std::vector<T> biasV;
288 biasV.assign(bias.data(), bias.data() + outputChannels);
289 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
290 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
291 outputWidth, outputHeight);
292 }
293
Sadik Armagan483c8112021-06-01 09:24:52 +0100294 // Data will be copied from outputHandle
295 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
296
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100297 // Construct expected output data - two identical images.
Sadik Armagan483c8112021-06-01 09:24:52 +0100298 std::vector<T> expectedOutput;
299 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
300 expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100301
302 // at this point if we require it permute the expected output
303 if (layout == armnn::DataLayout::NHWC)
304 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100305 std::vector<T> tmp(expectedOutput.size());
306 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T));
307 expectedOutput = tmp;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100308 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100309
310 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
311 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100312 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +0100313
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100314 armnn::Convolution2dQueueDescriptor data;
315 armnn::WorkloadInfo info;
James Conroy1f58f032021-04-27 17:13:27 +0100316 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
317 armnn::ScopedTensorHandle biasTensor(biasDesc);
Sadik Armagan483c8112021-06-01 09:24:52 +0100318
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100319 // Permute the kernel if necessary
Sadik Armagan483c8112021-06-01 09:24:52 +0100320 std::vector<T> kernel = originalKernel;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100321 if (layout == armnn::DataLayout::NHWC)
322 {
323 armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
324 }
Sadik Armagan483c8112021-06-01 09:24:52 +0100325 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100326
327 if(biasEnabled)
328 {
Sadik Armagan483c8112021-06-01 09:24:52 +0100329 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100330 }
331
332 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100333 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100334 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
335
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100336 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
337 if (biasEnabled)
338 {
339 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
340 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
341 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100342 data.m_Weight = &weightsTensor;
343 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
344 data.m_Parameters.m_StrideX = strideX;
345 data.m_Parameters.m_StrideY = strideY;
346 data.m_Parameters.m_PadLeft = padLeft;
347 data.m_Parameters.m_PadRight = padRight;
348 data.m_Parameters.m_PadTop = padTop;
349 data.m_Parameters.m_PadBottom = padBottom;
350 data.m_Parameters.m_BiasEnabled = biasEnabled;
351 data.m_Parameters.m_DataLayout = layout;
352 data.m_Parameters.m_DilationX = dilationX;
353 data.m_Parameters.m_DilationY = dilationY;
354
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000355 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
356 data,
357 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100358 inputHandle->Allocate();
359 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100360 weightsHandle->Allocate();
361
362 if (biasEnabled)
363 {
364 biasHandle->Allocate();
365 CopyDataToITensorHandle(biasHandle.get(), bias.data());
366 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100367
Sadik Armagan483c8112021-06-01 09:24:52 +0100368 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100369 CopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100370
371 ExecuteWorkload(*workload, memoryManager);
372
Sadik Armagan483c8112021-06-01 09:24:52 +0100373 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100374
Sadik Armagan483c8112021-06-01 09:24:52 +0100375 return LayerTestResult<T, 4>(actualOutput,
376 expectedOutput,
377 outputHandle->GetShape(),
378 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100379}
380
381template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100382 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>,
383 armnn::DataType OutType = ArmnnType, typename O = armnn::ResolveType<OutType>>
384LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100385 armnn::IWorkloadFactory& workloadFactory,
386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100387 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +0100388 const std::vector<T>& input,
389 const std::vector<T>& kernel,
390 const std::vector<B>& bias,
391 const std::vector<O>& outputExpected,
392 const armnn::TensorShape& inputShape,
393 const armnn::TensorShape& kernelShape,
394 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100395 const armnn::DataLayout dataLayout,
396 float qScale,
397 int32_t qOffset,
398 uint32_t padLeft = 1,
399 uint32_t padTop = 1,
400 uint32_t padRight = 1,
401 uint32_t padBottom = 1,
402 uint32_t strideX = 1,
403 uint32_t strideY = 1)
404{
Jan Eilers8eb25602020-03-09 12:13:48 +0000405 armnn::IgnoreUnused(qScale, qOffset);
Sadik Armagan483c8112021-06-01 09:24:52 +0100406 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
407 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[3]);
408 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[1]);
409 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100410
Sadik Armagan483c8112021-06-01 09:24:52 +0100411 unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
412 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
413 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
414 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100415
Sadik Armagan483c8112021-06-01 09:24:52 +0100416 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
417 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
418 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
419 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100420
421 bool biasEnabled = bias.size() > 0;
422
423 // Creates the tensors.
424 armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
425 armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100426 OutType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100427 armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
428 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
429
430 // Construct the input data.
431 std::vector<T> inputData;
432 inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100433
434 // Construct the output data, with bias applied, as appropriate.
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +0100435 std::vector<O> outputData;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100436 outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
437
Sadik Armagan483c8112021-06-01 09:24:52 +0100438 std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +0100439
440 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
441 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100442 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
443 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +0100444
James Conroy1f58f032021-04-27 17:13:27 +0100445 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
Sadik Armagan483c8112021-06-01 09:24:52 +0100446 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100447
James Conroy1f58f032021-04-27 17:13:27 +0100448 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100449
450 armnn::Convolution2dQueueDescriptor data;
451
452 data.m_Weight = &weightsTensor;
453 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
454 data.m_Parameters.m_StrideX = strideX;
455 data.m_Parameters.m_StrideY = strideY;
456 data.m_Parameters.m_PadLeft = padLeft;
457 data.m_Parameters.m_PadRight = padRight;
458 data.m_Parameters.m_PadTop = padTop;
459 data.m_Parameters.m_PadBottom = padBottom;
460 data.m_Parameters.m_BiasEnabled = biasEnabled;
461 data.m_Parameters.m_DataLayout = dataLayout;
462
463 armnn::WorkloadInfo info;
464 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100465 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100466 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
467
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100468 if (biasEnabled)
469 {
470 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
471 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
472 }
473
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000474 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
475 data,
476 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100477 inputHandle->Allocate();
478 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100479 weightsHandle->Allocate();
480
481 if (biasEnabled)
482 {
483 biasHandle->Allocate();
484 CopyDataToITensorHandle(biasHandle.get(), bias.data());
485 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100486
Sadik Armagan483c8112021-06-01 09:24:52 +0100487 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100488 CopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100489
490 ExecuteWorkload(*workload, memoryManager);
491
Sadik Armagan483c8112021-06-01 09:24:52 +0100492 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100493
Sadik Armagan483c8112021-06-01 09:24:52 +0100494 return LayerTestResult<O, 4>(actualOutput,
495 outputData,
496 outputHandle->GetShape(),
497 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100498}
499
500template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
501LayerTestResult<T,4> Convolution1dTestImpl(
502 armnn::IWorkloadFactory& workloadFactory,
503 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100504 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100505 float qScale,
506 int32_t qOffset,
507 bool biasEnabled)
508{
509 using B = armnn::ResolveType<ArmnnBType>;
510 // Until we have a specialist 1D convolution layer, we can fake one using
511 // 2D convolution with the final dimension set to 1.
512 // I don't anticipate this being particularly slow, given that convolution is implemented
513 // as a matrix multiplication, at which point dimension doesn't matter.
514
515 unsigned int batchSize = 1;
516 unsigned int inputChannels = 2;
517 unsigned int outputChannels = 3;
518 unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
519 unsigned int kernelSize = 3;
520 unsigned int padSize = 2;
521 unsigned int stride = 1;
522 unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
523
524 armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
525 armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
526 armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
527 armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
528
529 // Set quantization parameters if the requested type is a quantized type.
530 if(armnn::IsQuantizedType<T>())
531 {
532 inputInfo.SetQuantizationScale(qScale);
533 inputInfo.SetQuantizationOffset(qOffset);
534 outputInfo.SetQuantizationScale(qScale);
535 outputInfo.SetQuantizationOffset(qOffset);
536 kernelInfo.SetQuantizationScale(qScale);
537 kernelInfo.SetQuantizationOffset(qOffset);
538 biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
539 biasInfo.SetQuantizationOffset(0);
540 }
541
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100542 std::vector<T> inputData = QuantizedVector<T>(
543 {
544 5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
545 -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
546 },
547 inputInfo.GetQuantizationScale(),
548 inputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100549
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100550 std::vector<T> kernelData = QuantizedVector<T>(
551 {
552 1.0f, 0.0f, 0.0f,
553 0.0f, 2.0f, -1.5f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100554
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100555 0.0f, 0.0f, 0.0f,
556 0.2f, 0.2f, 0.2f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100557
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100558 0.5f, 0.0f, 0.5f,
559 0.0f, -1.0f, 0.0f
560 },
561 kernelInfo.GetQuantizationScale(),
562 kernelInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100563
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100564 std::vector<B> biasData =
565 QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100566
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100567 std::vector<T> outputData = QuantizedVector<T>(
568 {
569 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100570 -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100571 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
572 },
573 outputInfo.GetQuantizationScale(),
574 outputInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100575
Sadik Armagan483c8112021-06-01 09:24:52 +0100576 std::vector<T> actualOutput(outputInfo.GetNumElements());
577
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100578 // Optionally apply bias to output image.
579 if(biasEnabled)
580 {
581 ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
582 biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
583 1, outputSize);
584 }
Keith Davisf500d6c2020-08-31 08:32:55 +0100585
586 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
587 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100588 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
589 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +0100590
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100591 armnn::Convolution2dQueueDescriptor data;
592 armnn::WorkloadInfo info;
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100593 armnn::ScopedTensorHandle weightsTensor(kernelInfo);
594 armnn::ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100595
596 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
597 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
598
599 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100600 AddInputToWorkload(data, info, kernelInfo, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100601 AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
602
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100603 data.m_Weight = &weightsTensor;
604 data.m_Bias = &biasTensor;
605 data.m_Parameters.m_StrideX = 1;
606 data.m_Parameters.m_StrideY = stride;
607 data.m_Parameters.m_PadLeft = 0;
608 data.m_Parameters.m_PadRight = 0;
609 data.m_Parameters.m_PadTop = padSize;
610 data.m_Parameters.m_PadBottom = padSize;
611 data.m_Parameters.m_BiasEnabled = biasEnabled;
612
613 if (biasEnabled)
614 {
615 biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
616 AddInputToWorkload(data, info, biasInfo, biasHandle.get());
617 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100618
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000619 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
620 data,
621 info);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100622 inputHandle->Allocate();
623 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100624 weightsHandle->Allocate();
625
626 if (biasEnabled)
627 {
628 biasHandle->Allocate();
629 CopyDataToITensorHandle(biasHandle.get(), biasData.data());
630 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100631
632 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100633 CopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100634
635 ExecuteWorkload(*workload, memoryManager);
636
Sadik Armagan483c8112021-06-01 09:24:52 +0100637 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
638
639 return LayerTestResult<T, 4>(actualOutput,
640 outputData,
641 outputHandle->GetShape(),
642 outputInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100643}
644
645template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
646LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
647 armnn::IWorkloadFactory& workloadFactory,
648 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100649 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100650 float qScale,
651 int32_t qOffset,
652 bool biasEnabled,
653 armnn::DataLayout dataLayout)
654{
Jan Eilers8eb25602020-03-09 12:13:48 +0000655 armnn::IgnoreUnused(biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100656 // Use common single-batch 5x5 image.
657
Sadik Armagan483c8112021-06-01 09:24:52 +0100658 armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType);
659 std::vector<T> input =
660 {
661 1, 5, 2, 3,
662 8, 7, 3, 6,
663 3, 3, 9, 1
664 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100665
666 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100667 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
668 std::vector<T> kernel =
669 {
670 4, 5, 6,
671 0, 0, 0,
672 3, 2, 1
673 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100674
675 // Expected output is 1 batch of a 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100676 armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100677 const std::vector<float> outputData =
Sadik Armagan483c8112021-06-01 09:24:52 +0100678 {
679 23, 41, 33, 21,
680 44, 65, 76, 52,
681 82, 85, 79, 42
682 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100683
684 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
685 workloadFactory,
686 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100687 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100688 input,
689 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100690 std::vector<T>(),
691 outputData,
692 inputDesc.GetShape(),
693 kernelDesc.GetShape(),
694 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100695 dataLayout,
696 qScale,
697 qOffset);
698}
699
700template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
701LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
702 armnn::IWorkloadFactory& workloadFactory,
703 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100704 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100705 float qScale,
706 int32_t qOffset,
707 bool biasEnabled,
708 const armnn::DataLayout& dataLayout)
709{
Jan Eilers8eb25602020-03-09 12:13:48 +0000710 armnn::IgnoreUnused(biasEnabled);
Derek Lambertic374ff02019-12-10 21:57:35 +0000711
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100712 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100713 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType);
714 std::vector<T> input =
715 {
716 1, 5, 2, 3, 5,
717 8, 7, 3, 6, 3,
718 3, 3, 9, 1, 9,
719 4, 1, 8, 1, 3,
720 6, 8, 1, 9, 2
721 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100722
723 // Use a 3x3 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100724 armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
725 std::vector<T> kernel =
726 {
727 4, 5, 6,
728 0, 0, 0,
729 3, 2, 1
730 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100731
732 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100733 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType);
734 std::vector<T> outputData =
735 {
736 23, 33, 24,
737 91, 99, 48,
738 26, 50, 19
739 };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100740
741 uint32_t padLeft = 1;
742 uint32_t padTop = 1;
743 uint32_t padRight = 1;
744 uint32_t padBottom = 1;
745 uint32_t strideX = 2;
746 uint32_t strideY = 2;
747
748 return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
749 workloadFactory,
750 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100751 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100752 input,
753 kernel,
Sadik Armagan483c8112021-06-01 09:24:52 +0100754 std::vector<T>(),
755 outputData,
756 inputDesc.GetShape(),
757 kernelDesc.GetShape(),
758 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100759 dataLayout,
760 qScale,
761 qOffset,
762 padLeft,
763 padTop,
764 padRight,
765 padBottom,
766 strideX,
767 strideY);
768}
769
770template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
771LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
772 armnn::IWorkloadFactory& workloadFactory,
773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100774 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100775 float qScale,
776 int32_t qOffset,
777 bool biasEnabled,
778 const armnn::DataLayout layout)
779{
780 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100781 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
782 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100783
784 // Use a 2-element batch with 3-channel 3x5 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100785 armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType);
786 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100787 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100788 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100789 1, 1, 1,
790 1, 1, 1,
791 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100792
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100793 0, 0, 0,
794 0, 0, 0,
795 0, 0, 0,
796 0, 0, 0,
797 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100798
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100799 2, 2, 2,
800 2, 2, 2,
801 2, 2, 2,
802 2, 2, 2,
803 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100804
805
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100806 0, 0, 0,
807 0, 0, 0,
808 0, 0, 0,
809 0, 0, 0,
810 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100811
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100812 1, 1, 1,
813 1, 1, 1,
814 1, 1, 1,
815 1, 1, 1,
816 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100817
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100818 0, 0, 0,
819 0, 0, 0,
820 0, 0, 0,
821 0, 0, 0,
822 0, 0, 0
823 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100824 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100825
826 // Expected output is 2 batch elements of a 1-channel 14x4 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100827 armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType);
828 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100829 -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
830 -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
831 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
832 -23.5f, -23.5f, -23.5f,
833 -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
834 -23.5f, -23.5f, -23.5f,
835
836 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
837 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
838 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
839 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100840 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100841 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100842
843 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
844 workloadFactory,
845 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100846 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100847 input,
848 kernel,
849 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
850 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100851 inputDesc.GetShape(),
852 kernelDesc.GetShape(),
853 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100854 qScale,
855 qOffset,
856 layout);
857}
858
859template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
860 typename T = armnn::ResolveType<ArmnnType>>
861LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
862 armnn::IWorkloadFactory& workloadFactory,
863 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100864 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100865 float qScale,
866 int32_t qOffset,
867 bool biasEnabled,
868 const armnn::DataLayout layout)
869{
870 // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
871
872 // Use common single-batch 3-channel 16x8 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100873 armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
874 std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
875 std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100876
877 // Use a 2-element batch of 3-channel 3x3 kernels.
Sadik Armagan483c8112021-06-01 09:24:52 +0100878 armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType);
879 std::vector<T> kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100880 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100881 1, -1, 1,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100882 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100883
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100884 0, 0, 0,
885 0, 0, 0,
886 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100887
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100888 2, 2, 2,
889 2, 2, 2,
890 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100891
892
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100893 0, 0, 0,
894 0, 0, 0,
895 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100896
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100897 1, 1, 1,
898 1, 1, 1,
899 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100900
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100901 0, 0, 0,
902 0, 0, 0,
903 0, 0, 0
904 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100905 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100906
907 // Expected output is 1 batch of a 2-channel 14x6 image.
Sadik Armagan483c8112021-06-01 09:24:52 +0100908 armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType);
909 std::vector<T> expectedOutput = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100910 -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
911 -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
912 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
913 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
914 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
915 -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
916
917 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
918 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
919 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
920 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
921 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
922 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100923 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100924 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100925
926 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
927 workloadFactory,
928 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100929 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100930 input,
931 kernel,
932 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
933 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +0100934 inputDesc.GetShape(),
935 kernelDesc.GetShape(),
936 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100937 qScale,
938 qOffset,
939 layout);
940}
941
942template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
943 typename T = armnn::ResolveType<ArmnnType>>
944LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
945 armnn::IWorkloadFactory& workloadFactory,
946 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100947 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100948 const armnn::DataLayout layout,
949 float qScale,
950 int32_t qOffset)
951{
952 // Use a single-batch 1-channel 3x3 image as input.
Sadik Armagan483c8112021-06-01 09:24:52 +0100953 armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType);
954 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100955 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100956 11,21,31,
957 12,22,32,
958 13,23,33
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100959 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100960 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100961
962 // Use 1 batch of a 1-channel 2x2 kernel.
Sadik Armagan483c8112021-06-01 09:24:52 +0100963 armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType);
964 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100965 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100966 -11,-21,
967 -12,-22,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100968 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100969 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100970
971// Expected output is 1 batch of a 1-channel 6x8 image.
972// Manually calculated like this:
973//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
974//[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
975//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
976//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
977//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
978//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
979//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
Sadik Armagan483c8112021-06-01 09:24:52 +0100980 armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType);
981 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100982 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100983 0, 0, 0, 0, 0, 0,
984 -242, -594, -934, -372, 0, 0,
985 -495, -1190, -1850, -725, 0, 0,
986 -538, -1256, -1916, -748, 0, 0,
987 -273, -626, -946, -363, 0, 0,
988 0, 0, 0, 0, 0, 0,
989 0, 0, 0, 0, 0, 0,
990 0, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100991 },
Sadik Armagan483c8112021-06-01 09:24:52 +0100992 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100993
994 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
995 workloadFactory,
996 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +0100997 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +0100998 input,
999 kernel,
1000 GetBias2<ArmnnBType>(false, qScale * qScale),
1001 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001002 inputDesc.GetShape(),
1003 kernelDesc.GetShape(),
1004 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001005 qScale,
1006 qOffset,
1007 layout,
1008 1, // Padding left.
1009 2, // Padding top.
1010 3, // Padding right.
1011 4); // Padding bottom.
1012}
1013
1014template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1015 typename T = armnn::ResolveType<ArmnnType>>
1016LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
1017 armnn::IWorkloadFactory& workloadFactory,
1018 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001019 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001020 const armnn::DataLayout layout,
1021 float qScale,
1022 int32_t qOffset)
1023{
1024 // Use a single-batch 1-channel 5x5 image as input.
1025 armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001026 std::vector<T> input =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001027 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001028 11,21,31,41,51,
1029 12,22,32,42,52,
1030 13,23,33,43,53,
1031 14,24,34,44,54,
1032 15,25,35,45,55,
Sadik Armagan483c8112021-06-01 09:24:52 +01001033 }, qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001034
1035 // Use 1 batch of a 1-channel 4x4 kernel.
1036 armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001037 std::vector<T> kernel =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001038 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001039 -11,-21,-31,-41,
1040 -12,-22,-32,-42,
1041 -13,-23,-33,-43,
1042 -14,-24,-34,-44,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001043 },
Sadik Armagan483c8112021-06-01 09:24:52 +01001044 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001045
1046 // Expected output is 1 batch of a 1-channel 5x5 image.
1047 armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01001048 std::vector<T> expectedOutput =
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001049 QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001050 -7140, -10580, -13940, -9300, -5230,
1051 -9590, -14120, -18520, -12290, -6860,
1052 -9980, -14560, -18960, -12560, -7000,
1053 -7518, -10904, -14144, -9318, -5152,
1054 -5032, -7256, -9376, -6142, -3368,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001055 },
Sadik Armagan483c8112021-06-01 09:24:52 +01001056 qScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001057
1058 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1059 workloadFactory,
1060 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001061 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001062 input,
1063 kernel,
1064 GetBias2<ArmnnBType>(false, qScale * qScale),
1065 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001066 inputDesc.GetShape(),
1067 kernelDesc.GetShape(),
1068 outputDesc.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001069 qScale,
1070 qOffset,
1071 layout,
1072 1, // Padding left.
1073 1, // Padding top.
1074 2, // Padding right.
1075 2); // Padding bottom.
1076}
1077
1078template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1079LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
1080 armnn::IWorkloadFactory& workloadFactory,
1081 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001082 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001083 const std::vector<float>& inputNoQuantizedValues,
1084 armnn::TensorInfo& inputTensorInfo,
1085 const std::vector<float>& kernelNoQuantizedValues,
1086 armnn::TensorInfo& kernelTensorInfo,
1087 const std::vector<float>& outputExpectedNoQuantizedValues,
1088 armnn::TensorInfo& outputTensorInfo,
1089 uint32_t dilationX,
1090 uint32_t dilationY,
1091 armnn::DataLayout layout = armnn::DataLayout::NCHW,
1092 uint32_t padLeft = 0,
1093 uint32_t padTop = 0,
1094 uint32_t padRight = 0,
1095 uint32_t padBottom = 0,
1096 uint32_t strideX = 1,
1097 uint32_t strideY = 1,
1098 bool biasEnabled = false
1099)
1100{
1101 float qScale;
1102 int32_t qOffset;
1103 switch (ArmnnType)
1104 {
Derek Lambertif90c56d2020-01-10 17:14:08 +00001105 case armnn::DataType::QAsymmU8:
Sadik Armagan303980c2020-04-17 12:45:14 +01001106 case armnn::DataType::QAsymmS8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001107 {
1108 qScale = 0.1f;
1109 qOffset = 128;
1110 break;
1111 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00001112 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001113 {
1114 qScale = 0.1f;
1115 qOffset = 0;
1116 break;
1117 }
1118 case armnn::DataType::Float32:
1119 default:
1120 {
1121 qScale = 0.f;
1122 qOffset = 0;
1123 break;
1124 }
1125 }
1126
1127 inputTensorInfo.SetQuantizationScale(qScale);
1128 inputTensorInfo.SetQuantizationOffset(qOffset);
1129 kernelTensorInfo.SetQuantizationScale(qScale);
1130 kernelTensorInfo.SetQuantizationOffset(qOffset);
1131 outputTensorInfo.SetQuantizationScale(qScale);
1132 outputTensorInfo.SetQuantizationOffset(qOffset);
1133
Sadik Armagan483c8112021-06-01 09:24:52 +01001134 auto input = QuantizedVector<T>(inputNoQuantizedValues,
1135 inputTensorInfo.GetQuantizationScale(),
1136 inputTensorInfo.GetQuantizationOffset());
1137 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
1138 kernelTensorInfo.GetQuantizationScale(),
1139 kernelTensorInfo.GetQuantizationOffset());
1140 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
1141 outputTensorInfo.GetQuantizationScale(),
1142 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001143
1144 return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1145 workloadFactory,
1146 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001147 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001148 input,
1149 kernel,
1150 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1151 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01001152 inputTensorInfo.GetShape(),
1153 kernelTensorInfo.GetShape(),
1154 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001155 qScale,
1156 qOffset,
1157 layout,
1158 padLeft,
1159 padTop,
1160 padRight,
1161 padBottom,
1162 strideX,
1163 strideY,
1164 dilationX,
1165 dilationY);
1166}
1167
1168template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1169LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test(
1170 armnn::IWorkloadFactory& workloadFactory,
1171 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001172 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001173 bool biasEnabled,
1174 const armnn::DataLayout layout)
1175{
Sadik Armagan483c8112021-06-01 09:24:52 +01001176 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001177 std::vector<float> inputNoQuantizedValues =
1178 {
1179 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1180 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1183 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1184 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1187 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1189 };
1190
1191 armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1192 std::vector<float> kernelNoQuantizedValues =
1193 {
1194 1, 2, 3,
1195 4, 5, 6,
1196 7, 8, 9
1197 };
1198
1199 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1200 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1201 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1202 std::vector<float> outputExpectedNoQuantizedValues =
1203 {
1204 6., 5., 5., 5.,
1205 6., 5., 5., 5.,
1206 6., 5., 5., 5.,
1207 3., 2., 2., 2.
1208 };
1209
1210 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1211 workloadFactory,
1212 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001213 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001214 inputNoQuantizedValues,
1215 inputTensorInfo,
1216 kernelNoQuantizedValues,
1217 kernelTensorInfo,
1218 outputExpectedNoQuantizedValues,
1219 outputTensorInfo,
1220 3,
1221 3,
1222 layout,
1223 biasEnabled);
1224}
1225
1226template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1227LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test(
1228 armnn::IWorkloadFactory& workloadFactory,
1229 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001230 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001231 bool biasEnabled,
1232 const armnn::DataLayout layout)
1233{
Sadik Armagan483c8112021-06-01 09:24:52 +01001234 armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001235 std::vector<float> inputNoQuantizedValues =
1236 {
1237 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1238 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1239 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1240 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1241 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1242 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1243 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1245 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1246 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1247
1248 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1249 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1250 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1251 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1252 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1253 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1254 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1255 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1256 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1258 };
1259
Sadik Armagan483c8112021-06-01 09:24:52 +01001260 armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001261 std::vector<float> kernelNoQuantizedValues =
1262 {
1263 1, 2, 3,
1264 4, 5, 6,
1265 7, 8, 9,
1266
1267 1, 2, 3,
1268 4, 5, 6,
1269 7, 8, 9
1270 };
1271
1272 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1273 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
Sadik Armagan483c8112021-06-01 09:24:52 +01001274 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001275 std::vector<float> outputExpectedNoQuantizedValues =
1276 {
1277 12., 10., 10., 10.,
1278 12., 10., 10., 10.,
1279 12., 10., 10., 10.,
1280 6., 4., 4., 4.
1281 };
1282
1283 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1284 workloadFactory,
1285 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001286 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001287 inputNoQuantizedValues,
1288 inputTensorInfo,
1289 kernelNoQuantizedValues,
1290 kernelTensorInfo,
1291 outputExpectedNoQuantizedValues,
1292 outputTensorInfo,
1293 3,
1294 3,
1295 layout,
1296 biasEnabled);
1297}
1298
1299template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
1300LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test(
Sadik Armagan483c8112021-06-01 09:24:52 +01001301 armnn::IWorkloadFactory& workloadFactory,
1302 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001303 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001304 bool biasEnabled,
1305 const armnn::DataLayout layout)
1306{
Sadik Armagan483c8112021-06-01 09:24:52 +01001307 armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001308 std::vector<float> inputNoQuantizedValues =
1309 {
1310 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1311 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1312 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1313 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1314 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1315 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1316 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1317 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1318 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1319 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1320 };
1321
Sadik Armagan483c8112021-06-01 09:24:52 +01001322 armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001323 std::vector<float> kernelNoQuantizedValues =
1324 {
1325 1, 2,
1326 3, 4
1327 };
1328
1329 // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
1330 // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
1331 // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
1332 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1333 std::vector<float> outputExpectedNoQuantizedValues =
1334 {
1335 4, 7, 7, 3,
1336 6, 10, 10, 4,
1337 6, 10, 10, 4,
1338 2, 3, 3, 1
1339 };
1340 uint32_t padLeft = 1;
1341 uint32_t padTop = 1;
1342 uint32_t padRight = 1;
1343 uint32_t padBottom = 1;
1344
1345 return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1346 workloadFactory,
1347 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001348 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001349 inputNoQuantizedValues,
1350 inputTensorInfo,
1351 kernelNoQuantizedValues,
1352 kernelTensorInfo,
1353 outputExpectedNoQuantizedValues,
1354 outputTensorInfo,
1355 2,
1356 2,
1357 layout,
1358 padLeft,
1359 padTop,
1360 padRight,
1361 padBottom,
1362 3,
1363 3,
1364 biasEnabled
1365 );
1366}
1367
1368template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1369LayerTestResult<T,4> CompareConvolution2dTestImpl(
1370 armnn::IWorkloadFactory& workloadFactory,
1371 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001372 armnn::IWorkloadFactory& refWorkloadFactory,
1373 const armnn::ITensorHandleFactory& tensorHandleFactory,
1374 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001375{
1376 unsigned int inputHeight = 8;
1377 unsigned int inputWidth = 16;
1378 unsigned int inputChannels = 3;
1379 unsigned int inputNum = 5;
1380
1381 unsigned int kernelHeight = 3;
1382 unsigned int kernelWidth = 3;
1383
1384 unsigned int strideX = 2;
1385 unsigned int strideY = 3;
1386 unsigned int padX = 1;
1387 unsigned int padY = 1;
1388
1389 unsigned int outputNum = inputNum;
1390 unsigned int outputChannels = 2;
1391 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1392 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1393
1394 armnn::TensorInfo inputTensorInfo;
1395 armnn::TensorInfo outputTensorInfo;
1396 armnn::TensorInfo kernelDesc;
1397 armnn::TensorInfo biasDesc;
1398
1399 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1400 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1401 unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1402 unsigned int biasShape[] = {outputChannels};
1403
1404 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
1405 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
1406 kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
1407 biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
1408
Sadik Armagan483c8112021-06-01 09:24:52 +01001409 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
1410 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
1411 auto bias = MakeRandomTensor<T>(biasDesc, 1028);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001412
Sadik Armagan483c8112021-06-01 09:24:52 +01001413 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1414 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01001415
1416 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001417 std::unique_ptr<armnn::ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
1418 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01001419 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1420
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001421 armnn::Convolution2dQueueDescriptor data;
1422 armnn::WorkloadInfo info;
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001423
James Conroy1f58f032021-04-27 17:13:27 +01001424 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1425 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001426
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001427 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1428 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
1429 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
1430 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1431
1432 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
1433 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
1434 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
1435 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
1436 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernel.data());
Sadik Armagan483c8112021-06-01 09:24:52 +01001437 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001438 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
Sadik Armagan483c8112021-06-01 09:24:52 +01001439 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001440
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001441 data.m_Weight = &weightsTensor;
1442 data.m_Bias = &biasTensor;
1443 data.m_Parameters.m_StrideX = strideX;
1444 data.m_Parameters.m_StrideY = strideY;
1445 data.m_Parameters.m_PadLeft = padX;
1446 data.m_Parameters.m_PadRight = padX;
1447 data.m_Parameters.m_PadTop = padY;
1448 data.m_Parameters.m_PadBottom = padY;
1449 data.m_Parameters.m_BiasEnabled = true;
Keith Davisf500d6c2020-08-31 08:32:55 +01001450
1451 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001452 std::unique_ptr<armnn::ITensorHandle> weightsHandleRef = refTensorHandleFactory.CreateTensorHandle(kernelDesc);
1453 std::unique_ptr<armnn::ITensorHandle> biasHandleRef = refTensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01001454 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1455
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001456 armnn::Convolution2dQueueDescriptor refData = data;
Sadik Armagan483c8112021-06-01 09:24:52 +01001457 armnn::WorkloadInfo refInfo = info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001458 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001459 SetWorkloadInput(refData, refInfo, 1, kernelDesc, weightsHandleRef.get());
1460 SetWorkloadInput(refData, refInfo, 2, biasDesc, biasHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001461 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1462
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001463 std::unique_ptr<armnn::IWorkload> workload
1464 = workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, data, info);
1465 std::unique_ptr<armnn::IWorkload> workloadRef
1466 = refWorkloadFactory.CreateWorkload(armnn::LayerType::Convolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001467
1468 outputHandleRef->Allocate();
1469 inputHandleRef->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001470 weightsHandleRef->Allocate();
1471 biasHandleRef->Allocate();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001472
1473 inputHandle->Allocate();
1474 outputHandle->Allocate();
1475
Sadik Armagan483c8112021-06-01 09:24:52 +01001476 CopyDataToITensorHandle(inputHandle.get(), input.data());
1477 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01001478 CopyDataToITensorHandle(weightsHandleRef.get(), kernel.data());
1479 CopyDataToITensorHandle(biasHandleRef.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001480
1481 ExecuteWorkload(*workload, memoryManager);
1482
1483 workloadRef->PostAllocationConfigure();
1484 workloadRef->Execute();
1485
Sadik Armagan483c8112021-06-01 09:24:52 +01001486 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1487 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001488
Sadik Armagan483c8112021-06-01 09:24:52 +01001489 return LayerTestResult<T, 4>(actualOutput,
1490 expectedOutput,
1491 outputHandle->GetShape(),
1492 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001493}
1494
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001495LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test(
1496 armnn::IWorkloadFactory& workloadFactory,
1497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001498 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001499 bool biasEnabled,
1500 const armnn::DataLayout& dataLayout)
1501{
1502 // BFloat16 input and weight, Float32 output
1503 armnn::IgnoreUnused(biasEnabled);
1504
1505 // Input is a single-batch, 1 channel, 5x5 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001506 armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001507
1508 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1509 {
1510 10.0367984f, // 10.0625
1511 2.0380895f, // 2.03125
1512 15.0420157f, // 15.0625
1513 22.0675631f, // 22.125
1514 8.0938920f, // 8.125
1515 5.0476106f, // 5.0625
1516 80.1035490f, // 80
1517 100.1260370f, // 100
1518 55.0461647f, // 55
1519 120.0883828f, // 120
1520 9.1159540f, // 9.125
1521 90.0498519f, // 90
1522 200.0104630f, // 200
1523 30.0154114f, // 30
1524 75.00137681f, // 75
1525 30.0344238f, // 30
1526 25.0356445f, // 25
1527 130.0495605f, // 130
1528 60.0683594f, // 60
1529 35.0991211f, // 35
1530 8.0461426f, // 8.0625
1531 12.0996094f, // 12.125
1532 98.1269530f, // 98
1533 125.0393066f, // 125
1534 5.103516f // 5.0937
1535 },
1536 1.0f, 0);
1537
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001538 // Use a 3x3 kernel.
1539 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1540
1541 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1542 {
1543 -0.126184f, // -0.125977
1544 -0.150468f, // -0.150391
1545 -0.101412f, // -0.101562
1546 -0.0586369f,// -0.0585938
1547 -0.0865864f,// -0.0864258
1548 -0.0435089f,// -0.043457
1549 0.0347555f, // 0.034668
1550 0.0323111f, // 0.0322266
1551 0.0385381f // 0.0385742
1552 },
1553 1.0f, 0);
1554
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001555 // Expected output is a single-batch, 1 channel, 3x3 image.
Sadik Armagan483c8112021-06-01 09:24:52 +01001556 armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32);
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001557
1558 // Expected output (with results if calculated as FP32 in the comments)
1559 const std::vector<float> outputData =
1560 {
1561 2.296875f, // 2.29240716
1562 5.75f, // 5.75851926
1563 3.78125f, // 3.79855026
1564 -11.625f, // -11.65498118
1565 -47.25f, // -47.27316893
1566 -30.0f, // -30.04771684
1567 -8.25f, // -8.28126168
1568 -43.5f, // -43.46531337
1569 -20.625f // -20.63477281
1570 };
1571
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001572 uint32_t padLeft = 1;
1573 uint32_t padTop = 1;
1574 uint32_t padRight = 1;
1575 uint32_t padBottom = 1;
1576 uint32_t strideX = 2;
1577 uint32_t strideY = 2;
1578
1579 return SimpleConvolution2dNhwcTestImpl
1580 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1581 workloadFactory,
1582 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001583 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001584 inputValues,
1585 kernelValues,
1586 std::vector<float>(),
1587 outputData,
1588 inputDesc.GetShape(),
1589 kernelDesc.GetShape(),
1590 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001591 dataLayout,
1592 1.0f,
1593 0,
1594 padLeft,
1595 padTop,
1596 padRight,
1597 padBottom,
1598 strideX,
1599 strideY);
1600}
1601
1602LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest(
1603 armnn::IWorkloadFactory& workloadFactory,
1604 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001605 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001606 bool biasEnabled,
1607 const armnn::DataLayout& dataLayout)
1608{
1609 // BFloat16 input and weight, Float32 output
1610 armnn::IgnoreUnused(biasEnabled);
1611
1612 // Input is a single-batch, 1 channel, 5x5 image.
1613 armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1614
1615 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1616 {
1617 0.0367984f, // 0.0368652
1618 0.0380895f, // 0.0380859
1619 0.0420157f, // 0.0419922
1620 0.0675631f, // 0.0673828
1621 0.0938920f, // 0.09375
1622 0.0476106f, // 0.0476074
1623 0.1035490f, // 0.103516
1624 0.1260370f, // 0.125977
1625 0.0461647f, // 0.0461426
1626 0.0883828f, // 0.0883789
1627 0.1159540f, // 0.115723
1628 0.0498519f, // 0.0498047
1629 0.0104630f, // 0.010437
1630 0.0154114f, // 0.0154419
1631 0.00137681f, // 0.00137329
1632 0.0344238f, // 0.0344616
1633 0.0356445f, // 0.0355693
1634 0.0495605f, // 0.0495018
1635 0.0683594f, // 0.0683308
1636 0.0991211f, // 0.0988837
1637 0.0461426f, // 0.0461838
1638 0.0996094f, // 0.0997546
1639 0.1269530f, // 0.127099
1640 0.0393066f, // 0.0392791
1641 0.103516f // 0.103641
1642 },
1643 1.0f, 0);
1644
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001645 // Use a 3x3 kernel.
1646 armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1647
1648 std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1649 {
1650 -0.126184f, // -0.125977
1651 -0.150468f, // -0.150391
1652 -0.101412f, // -0.101562
1653 -0.0586369f,// -0.0585938
1654 -0.0865864f,// -0.0864258
1655 -0.0435089f,// -0.043457
1656 0.0347555f, // 0.034668
1657 0.0323111f, // 0.0322266
1658 0.0385381f // 0.0385742
1659 },
1660 1.0f, 0);
1661
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001662 // Expected output is a single-batch, 1 channel, 3x3 image.
1663 armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1664
1665 // Expected output (with results if calculated as FP32 in the comments)
1666 const std::vector<float> outputData =
1667 {
1668 0.000686645508f, // 0.000685
1669 0.000640869141f, // 0.000639
1670 -0.00759887695f, // -0.007631
1671 -0.02734375f, // -0.027388
1672 -0.0356445312f, // -0.035737
1673 -0.0145874023f, // -0.014568
1674 -0.0170898438f, // -0.017124
1675 -0.0373535156f, // -0.037431
1676 -0.0346679688f // -0.034808
1677 };
1678
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001679 uint32_t padLeft = 1;
1680 uint32_t padTop = 1;
1681 uint32_t padRight = 1;
1682 uint32_t padBottom = 1;
1683 uint32_t strideX = 2;
1684 uint32_t strideY = 2;
1685
1686 return SimpleConvolution2dNhwcTestImpl
1687 <armnn::DataType::BFloat16, armnn::DataType::Float32, armnn::BFloat16, float, armnn::DataType::Float32, float>(
1688 workloadFactory,
1689 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001690 tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001691 inputValues,
1692 kernelValues,
1693 std::vector<float>(),
1694 outputData,
1695 inputDesc.GetShape(),
1696 kernelDesc.GetShape(),
1697 outputDesc.GetShape(),
Narumol Prangnawarate8cddeb2020-04-01 16:51:23 +01001698 dataLayout,
1699 1.0f,
1700 0,
1701 padLeft,
1702 padTop,
1703 padRight,
1704 padBottom,
1705 strideX,
1706 strideY);
1707}
1708
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001709//
1710// DepthwiseConvolution2d implementations
1711//
1712
1713template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
1714 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
1715LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
1716 armnn::IWorkloadFactory& workloadFactory,
1717 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001718 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01001719 const std::vector<T>& input,
1720 const std::vector<T>& kernel,
1721 const std::vector<B>& bias,
1722 const std::vector<T>& outputExpected,
1723 const armnn::TensorShape& inputShape,
1724 const armnn::TensorShape& kernelShape,
1725 const armnn::TensorShape& outputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001726 float qScale,
1727 int32_t qOffset,
1728 const armnn::DataLayout layout,
1729 uint32_t padLeft = 0,
1730 uint32_t padTop = 0,
1731 uint32_t padRight = 0,
1732 uint32_t padBottom = 0,
1733 uint32_t strideX = 1,
1734 uint32_t strideY = 1)
1735{
Sadik Armagan483c8112021-06-01 09:24:52 +01001736 unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
1737 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[1]);
1738 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
1739 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
Jan Eilers53ef7952021-06-02 12:01:25 +01001740 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
1741 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
1742 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
Sadik Armagan483c8112021-06-01 09:24:52 +01001743 unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
1744 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
1745 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
1746 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001747
1748 // If a bias is used, its size must equal the number of output channels.
1749 bool biasEnabled = bias.size() > 0;
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01001750 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001751
1752 // Creates the tensors.
1753 armnn::TensorInfo inputTensorInfo =
1754 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1755 armnn::TensorInfo outputTensorInfo =
1756 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001757 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001758 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
1759
1760 // Set quantization parameters if the requested type is a quantized type.
1761 if (armnn::IsQuantizedType<T>())
1762 {
1763 inputTensorInfo.SetQuantizationScale(qScale);
1764 inputTensorInfo.SetQuantizationOffset(qOffset);
1765 outputTensorInfo.SetQuantizationScale(qScale);
1766 outputTensorInfo.SetQuantizationOffset(qOffset);
1767 kernelDesc.SetQuantizationScale(qScale);
1768 kernelDesc.SetQuantizationOffset(qOffset);
1769 biasDesc.SetQuantizationScale(qScale*qScale);
1770 biasDesc.SetQuantizationOffset(0);
1771 }
1772
1773 // Construct the input data.
1774 std::vector<T> inputData;
1775 inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1776
1777 // At this point if we require it permute the input data
1778 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1779 if (layout == armnn::DataLayout::NHWC)
1780 {
1781 std::vector<T> tmp(inputData.size());
1782 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1783 inputData = tmp;
1784 }
1785
Cathal Corbett4b19d222022-05-11 20:12:17 +01001786 std::vector<T> kernelData;
1787 kernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
1788 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
1789 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
1790 {
1791 if (layout == armnn::DataLayout::NCHW)
1792 {
1793 std::vector<T> tmp(kernelData.size());
1794 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
1795 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
1796 kernelData = tmp;
1797 }
1798 }
1799
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001800 // Construct the output data, with bias applied, as appropriate.
1801 std::vector<T> outputData;
1802 outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1803 if (biasEnabled)
1804 {
1805 std::vector<T> biasV;
1806 biasV.assign(bias.data(), bias.data() + outputChannels);
1807 ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1808 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1809 outputWidth, outputHeight);
1810 }
1811
Sadik Armagan483c8112021-06-01 09:24:52 +01001812 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001813
1814 // At this point if we require it permute the expected output
1815 if (layout == armnn::DataLayout::NHWC)
1816 {
1817 std::vector<T> tmp(outputData.size());
1818 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
1819 outputData = tmp;
1820 }
1821
Keith Davisf500d6c2020-08-31 08:32:55 +01001822 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01001823 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
1824 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01001825 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1826
Cathal Corbett06902652022-04-14 17:55:11 +01001827 armnn::DepthwiseConvolution2dQueueDescriptor data;
1828 armnn::WorkloadInfo info;
1829
James Conroy1f58f032021-04-27 17:13:27 +01001830 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001831
Cathal Corbett06902652022-04-14 17:55:11 +01001832 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
1833 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
1834 // Needed in Neon and Cl Workload when permuting. Backend TensorHandle in (2) below will not work.
1835 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
1836 // Cannot PolymorphicDowncast from ScopedTensorHandle->RefTensorHandle.
1837 // Need to PolymorphicDowncast from ITensorHandle->RefTensorHandle.
Cathal Corbett4b19d222022-05-11 20:12:17 +01001838 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
1839 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Cathal Corbett06902652022-04-14 17:55:11 +01001840
1841 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1842 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
1843 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001844
James Conroy1f58f032021-04-27 17:13:27 +01001845 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001846 if (biasEnabled)
1847 {
Sadik Armagan483c8112021-06-01 09:24:52 +01001848 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Cathal Corbett06902652022-04-14 17:55:11 +01001849
1850 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
1851 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
1852 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001853 }
1854
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001855 data.m_Weight = &weightsTensor;
1856 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
1857 data.m_Parameters.m_StrideX = strideX;
1858 data.m_Parameters.m_StrideY = strideY;
1859 data.m_Parameters.m_PadLeft = padLeft;
1860 data.m_Parameters.m_PadRight = padRight;
1861 data.m_Parameters.m_PadTop = padTop;
1862 data.m_Parameters.m_PadBottom = padBottom;
1863 data.m_Parameters.m_BiasEnabled = biasEnabled;
1864 data.m_Parameters.m_DataLayout = layout;
1865
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001866 std::unique_ptr<armnn::IWorkload> workload
1867 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01001868
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001869 inputHandle->Allocate();
1870 outputHandle->Allocate();
1871
Sadik Armagan483c8112021-06-01 09:24:52 +01001872 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001873
1874 ExecuteWorkload(*workload, memoryManager);
1875
Sadik Armagan483c8112021-06-01 09:24:52 +01001876 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001877
Sadik Armagan483c8112021-06-01 09:24:52 +01001878 return LayerTestResult<T, 4>(actualOutput,
1879 outputData,
1880 outputHandle->GetShape(),
1881 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001882}
1883
1884template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
1885LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
1886 armnn::IWorkloadFactory& workloadFactory,
1887 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01001888 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001889 float qScale,
1890 int32_t qOffset,
1891 bool biasEnabled,
1892 const armnn::DataLayout layout)
1893{
1894 using B = armnn::ResolveType<ArmnnBType>;
1895
1896 unsigned int inputHeight = 3;
1897 unsigned int inputWidth = 3;
1898 unsigned int inputChannels = 2;
1899 unsigned int inputNum = 1;
1900
1901 unsigned int kernelHeight = 3;
1902 unsigned int kernelWidth = 3;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001903
1904 unsigned int outputHeight = 1;
1905 unsigned int outputWidth = 1;
Jan Eilers53ef7952021-06-02 12:01:25 +01001906 unsigned int outputChannels = inputChannels;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001907 unsigned int outputNum = inputNum;
1908
1909 armnn::TensorInfo inputTensorInfo =
1910 armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1911 armnn::TensorInfo outputTensorInfo =
1912 armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01001913 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001914 ArmnnType);
1915 armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
1916
1917 // Set quantization parameters if the requested type is a quantized type.
1918 if(armnn::IsQuantizedType<T>())
1919 {
1920 inputTensorInfo.SetQuantizationScale(qScale);
1921 inputTensorInfo.SetQuantizationOffset(qOffset);
1922 outputTensorInfo.SetQuantizationScale(qScale);
1923 outputTensorInfo.SetQuantizationOffset(qOffset);
1924 kernelDesc.SetQuantizationScale(qScale);
1925 kernelDesc.SetQuantizationOffset(qOffset);
1926 biasDesc.SetQuantizationScale(qScale*qScale);
1927 biasDesc.SetQuantizationOffset(0);
1928 }
1929 std::vector<T> inputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001930 QuantizedVector<T>({
1931 1.f, 2.f, 1.f,
1932 2.f, 1.f, 2.f,
1933 1.f, 2.f, 1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001934
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001935 1.f, 2.f, 1.f,
1936 2.f, 1.f, 2.f,
1937 1.f, 2.f, 1.f,
1938 },
1939 inputTensorInfo.GetQuantizationScale(),
1940 inputTensorInfo.GetQuantizationOffset()));
1941
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001942 // at this point if we require it permute the input data
1943 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1944 if (layout == armnn::DataLayout::NHWC)
1945 {
1946 std::vector<T> tmp(inputData.size());
1947 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1948 inputData = tmp;
1949 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001950
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001951 std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1952 biasDesc.GetQuantizationScale(),
1953 biasDesc.GetQuantizationOffset()));
1954
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001955 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001956 QuantizedVector<T>({
1957 1.f, 0.f, 1.f,
1958 0.f, 0.f, 0.f,
1959 -1.f, 0.f, -1.f,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001960
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001961 1.f, 0.f, 1.f,
1962 0.f, 0.f, 0.f,
1963 -1.f, 0.f, -1.f,
1964 },
1965 kernelDesc.GetQuantizationScale(),
1966 kernelDesc.GetQuantizationOffset()));
1967
Cathal Corbett4b19d222022-05-11 20:12:17 +01001968 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
1969 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
1970 {
1971 if (layout == armnn::DataLayout::NCHW)
1972 {
1973 std::vector<T> tmp(kernelData.size());
1974 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
1975 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
1976 kernelData = tmp;
1977 }
1978 }
1979
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001980 // Manually calculated.
1981 std::vector<T> outputImage(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001982 QuantizedVector<T>({ 0.f, 0.f },
1983 outputTensorInfo.GetQuantizationScale(),
1984 outputTensorInfo.GetQuantizationOffset())
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001985 );
1986
1987 // Optionally apply bias to output image.
1988 if(biasEnabled)
1989 {
1990 ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1991 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1992 outputWidth, outputHeight);
1993 }
1994
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001995 if (layout == armnn::DataLayout::NHWC)
1996 {
1997 std::vector<T> tmp(outputImage.size());
1998 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
1999 outputImage = tmp;
2000 }
2001
Sadik Armagan483c8112021-06-01 09:24:52 +01002002 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01002003
2004 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01002005 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
2006 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01002007 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2008
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002009 armnn::DepthwiseConvolution2dQueueDescriptor data;
2010 armnn::WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002011
Cathal Corbett06902652022-04-14 17:55:11 +01002012 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2013 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
2014 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
2015 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
2016 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
2017 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
2018 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002019
2020 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01002021 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002022 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2023
Cathal Corbett06902652022-04-14 17:55:11 +01002024 armnn::ScopedTensorHandle biasTensor(biasDesc);
2025 if (biasEnabled)
2026 {
2027 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
2028
2029 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
2030 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasV.data());
2031 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
2032 }
2033
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002034 data.m_Weight = &weightsTensor;
2035 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
2036 data.m_Parameters.m_StrideX = 1;
2037 data.m_Parameters.m_StrideY = 1;
2038 data.m_Parameters.m_PadLeft = 0;
2039 data.m_Parameters.m_PadRight = 0;
2040 data.m_Parameters.m_PadTop = 0;
2041 data.m_Parameters.m_PadBottom = 0;
2042 data.m_Parameters.m_BiasEnabled = biasEnabled;
2043 data.m_Parameters.m_DataLayout = layout;
2044
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002045 std::unique_ptr<armnn::IWorkload> workload
2046 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002047
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002048 inputHandle->Allocate();
2049 outputHandle->Allocate();
2050
Sadik Armagan483c8112021-06-01 09:24:52 +01002051 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002052
2053 ExecuteWorkload(*workload, memoryManager);
2054
Sadik Armagan483c8112021-06-01 09:24:52 +01002055 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002056
Sadik Armagan483c8112021-06-01 09:24:52 +01002057 return LayerTestResult<T, 4>(actualOutput,
2058 outputImage,
2059 outputHandle->GetShape(),
2060 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002061}
2062
2063template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
2064LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
2065 armnn::IWorkloadFactory& workloadFactory,
2066 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002067 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002068 float qScale,
2069 int32_t qOffset,
2070 bool biasEnabled,
2071 const armnn::DataLayout layout)
2072{
2073 using B = armnn::ResolveType<ArmnnBType>;
2074
2075 unsigned int depthMultiplier = 2;
2076
2077 unsigned int inputHeight = 8;
2078 unsigned int inputWidth = 16;
2079 unsigned int inputChannels = 2;
2080 unsigned int inputBatchSize = 1;
2081
2082 unsigned int kernelHeight = 5;
2083 unsigned int kernelWidth = 3;
2084
2085 unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
2086 unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
2087 unsigned int outputChannels = inputChannels * depthMultiplier;
2088 unsigned int outputBatchSize = inputBatchSize;
2089
2090 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
2091 inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2092 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
2093 outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
Jan Eilers53ef7952021-06-02 12:01:25 +01002094 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002095 ArmnnType);
2096 armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
2097
2098 // Set quantization parameters if the requested type is a quantized type.
2099 if(armnn::IsQuantizedType<T>())
2100 {
2101 inputTensorInfo.SetQuantizationScale(qScale);
2102 inputTensorInfo.SetQuantizationOffset(qOffset);
2103 outputTensorInfo.SetQuantizationScale(qScale);
2104 outputTensorInfo.SetQuantizationOffset(qOffset);
2105 kernelDesc.SetQuantizationScale(qScale);
2106 kernelDesc.SetQuantizationOffset(qOffset);
2107 biasDesc.SetQuantizationScale(qScale*qScale);
2108 biasDesc.SetQuantizationOffset(0);
2109 }
2110
2111 // NOTE: originalInputData is in NCHW format
2112 std::vector<T> originalInputData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002113 QuantizedVector<T>({
2114 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2115 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2116 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2117 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2118 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2119 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2120 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2121 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
2122 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2123 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2124 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2125 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2126 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2127 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2128 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
2129 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
2130 },
2131 inputTensorInfo.GetQuantizationScale(),
2132 inputTensorInfo.GetQuantizationOffset()));
2133
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002134 std::vector<T> inputData = originalInputData;
2135 // at this point if we require it permute the input data
2136 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2137 if (layout == armnn::DataLayout::NHWC)
2138 {
2139 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
2140 originalInputData.data(), inputData.data(), sizeof(T));
2141 }
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002142
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002143 std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2144 biasDesc.GetQuantizationScale(),
2145 biasDesc.GetQuantizationOffset());
2146
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002147 std::vector<T> kernelData = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002148 QuantizedVector<T>({
2149 1, 1, 1,
2150 1, -1, 1,
2151 1, 1, 1,
2152 1, 1, 1,
2153 1, 1, 1,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002154
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002155 2, 2, 2,
2156 2, 2, 2,
2157 2, 2, 2,
2158 2, 2, 2,
2159 2, 2, 2,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002160
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002161 0, 0, 0,
2162 0, -1, 0,
2163 0, 0, 0,
2164 0, 0, 0,
2165 0, 0, 0,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002166
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002167 0, 0, 0,
2168 0, 0, 0,
2169 0, 1, 0,
2170 0, 0, 0,
2171 0, 0, 0
2172 },
2173 kernelDesc.GetQuantizationScale(),
2174 kernelDesc.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002175
Cathal Corbett4b19d222022-05-11 20:12:17 +01002176 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
2177 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
2178 {
2179 if (layout == armnn::DataLayout::NCHW)
2180 {
2181 std::vector<T> tmp(kernelData.size());
2182 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
2183 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
2184 kernelData = tmp;
2185 }
2186 }
2187
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002188 // Manually calculated.
2189 std::vector<T> originalOutputImage = std::vector<T>(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002190 QuantizedVector<T>({
Jan Eilers53ef7952021-06-02 12:01:25 +01002191 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2192 5, 5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5,
2193 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5, 5, 5, 5, 5, 5, 5,
2194 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5,
2195 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 6, 6, 6, 6, 6, 6, 6,
2196 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
2197 1, 3, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2198 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2199 2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2200 2, 4, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2201 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2202 3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002203 },
2204 outputTensorInfo.GetQuantizationScale(),
2205 outputTensorInfo.GetQuantizationOffset()));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002206
2207 // Optionally apply bias to output image.
2208 if(biasEnabled)
2209 {
2210 ApplyBias(originalOutputImage,
2211 outputTensorInfo.GetQuantizationScale(),
2212 outputTensorInfo.GetQuantizationOffset(),
2213 biasV,
2214 biasDesc.GetQuantizationScale(),
2215 biasDesc.GetQuantizationOffset(),
2216 outputWidth,
2217 outputHeight);
2218 }
2219
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002220 std::vector<T> outputImage = originalOutputImage;
2221 if (layout == armnn::DataLayout::NHWC)
2222 {
2223 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
2224 originalOutputImage.data(), outputImage.data(), sizeof(T));
2225 }
2226
Sadik Armagan483c8112021-06-01 09:24:52 +01002227 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01002228
2229 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01002230 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
2231 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01002232 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2233
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002234 armnn::DepthwiseConvolution2dQueueDescriptor data;
2235 armnn::WorkloadInfo info;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002236
Cathal Corbett06902652022-04-14 17:55:11 +01002237 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2238 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
2239 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
2240 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
2241 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
2242 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
2243 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002244
2245 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01002246 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002247 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2248
Cathal Corbett06902652022-04-14 17:55:11 +01002249 armnn::ScopedTensorHandle biasTensor(biasDesc);
2250 if (biasEnabled)
2251 {
2252 AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
2253
2254 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
2255 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasV.data());
2256 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
2257 }
2258
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002259 data.m_Weight = &weightsTensor;
2260 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
2261 data.m_Parameters.m_StrideX = 2;
2262 data.m_Parameters.m_StrideY = 1;
2263 data.m_Parameters.m_PadLeft = 0;
2264 data.m_Parameters.m_PadRight = 0;
2265 data.m_Parameters.m_PadTop = 1;
2266 data.m_Parameters.m_PadBottom = 1;
2267 data.m_Parameters.m_BiasEnabled = biasEnabled;
2268 data.m_Parameters.m_DataLayout = layout;
2269
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002270 std::unique_ptr<armnn::IWorkload> workload
2271 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002272
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002273 inputHandle->Allocate();
2274 outputHandle->Allocate();
2275
Sadik Armagan483c8112021-06-01 09:24:52 +01002276 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002277
2278 ExecuteWorkload(*workload, memoryManager);
2279
Sadik Armagan483c8112021-06-01 09:24:52 +01002280 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002281
Sadik Armagan483c8112021-06-01 09:24:52 +01002282 return LayerTestResult<T, 4>(actualOutput,
2283 outputImage,
2284 outputHandle->GetShape(),
2285 outputTensorInfo.GetShape());
2286
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002287}
2288
2289template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2290 typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
2291LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
2292 armnn::IWorkloadFactory& workloadFactory,
2293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002294 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan483c8112021-06-01 09:24:52 +01002295 const std::vector<T>& originalInput,
2296 const std::vector<T>& originalKernel,
2297 const std::vector<B>& bias,
2298 const std::vector<T>& originalOutputExpected,
2299 const armnn::TensorShape& originalInputShape,
2300 const armnn::TensorShape& originalKernelShape,
2301 const armnn::TensorShape& originalOutputExpectedShape,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002302 float qScale,
2303 int32_t qOffset,
2304 const armnn::DataLayout layout = armnn::DataLayout::NCHW,
2305 uint32_t padLeft = 0,
2306 uint32_t padTop = 0,
2307 uint32_t padRight = 0,
2308 uint32_t padBottom = 0,
2309 uint32_t strideX = 1,
2310 uint32_t strideY = 1,
2311 uint32_t dilationX = 1,
2312 uint32_t dilationY = 1)
2313{
Sadik Armagan483c8112021-06-01 09:24:52 +01002314 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
2315 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
2316 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
2317 unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002318
Sadik Armagan483c8112021-06-01 09:24:52 +01002319 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
2320 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
2321 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
2322 unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002323
Jan Eilers53ef7952021-06-02 12:01:25 +01002324 unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
2325 unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
2326 unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002327
2328 bool biasEnabled = bias.size() > 0;
2329
2330 // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002331 ARMNN_ASSERT(inputNum == 1);
2332 ARMNN_ASSERT(outputNum == 1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002333
2334 // If a bias is used, its size must equal the number of output channels.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01002335 ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002336
2337
2338 // Note these tensors will use two (identical) batches.
2339 armnn::TensorInfo inputTensorInfo =
2340 armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2341 armnn::TensorInfo outputTensorInfo =
2342 armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
2343
2344 // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
Jan Eilers53ef7952021-06-02 12:01:25 +01002345 armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002346
2347 armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
2348
2349 // Set quantization parameters if the requested type is a quantized type.
2350 if(armnn::IsQuantizedType<T>())
2351 {
2352 inputTensorInfo.SetQuantizationScale(qScale);
2353 inputTensorInfo.SetQuantizationOffset(qOffset);
2354 outputTensorInfo.SetQuantizationScale(qScale);
2355 outputTensorInfo.SetQuantizationOffset(qOffset);
2356 kernelDesc.SetQuantizationScale(qScale);
2357 kernelDesc.SetQuantizationOffset(qOffset);
2358 biasDesc.SetQuantizationScale(qScale*qScale);
2359 biasDesc.SetQuantizationOffset(0);
2360 }
2361
Cathal Corbett4b19d222022-05-11 20:12:17 +01002362 std::vector<T> kernelData;
2363 kernelData.assign(originalKernel.data(), originalKernel.data() + kernelHeight*kernelWidth*outputChannels);
2364 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
2365 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
2366 {
2367 if (layout == armnn::DataLayout::NCHW)
2368 {
2369 std::vector<T> tmp(kernelData.size());
2370 kernelDesc.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
2371 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(T));
2372 kernelData = tmp;
2373 }
2374 }
2375
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002376 // Construct input data
2377 std::vector<T> input;
2378 input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2379 std::vector<T> inputData;
2380 inputData.insert(inputData.end(), input.begin(), input.end());
2381 inputData.insert(inputData.end(), input.begin(), input.end());
2382
2383 // at this point if we require it permute the input data
2384 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2385 if (layout == armnn::DataLayout::NHWC)
2386 {
2387 std::vector<T> tmp(inputData.size());
2388 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
2389 inputData = tmp;
2390 }
2391
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002392 std::vector<T> output;
2393 output.assign(originalOutputExpected.data(),
2394 originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2395
2396 // Apply bias to output data if it is enabled.
2397 if(biasEnabled)
2398 {
2399 std::vector<T> biasV;
2400 biasV.assign(bias.data(), bias.data() + outputChannels);
2401 ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
2402 biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2403 outputWidth, outputHeight);
2404 }
2405
Sadik Armagan483c8112021-06-01 09:24:52 +01002406 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2407
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002408 // Construct expected output data
2409 std::vector<T> outputData;
2410 outputData.insert(outputData.end(), output.begin(), output.end());
2411 outputData.insert(outputData.end(), output.begin(), output.end());
2412
2413 // at this point if we require it permute the expected output
2414 if (layout == armnn::DataLayout::NHWC)
2415 {
2416 std::vector<T> tmp(outputData.size());
2417 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
2418 outputData = tmp;
2419 }
Keith Davisf500d6c2020-08-31 08:32:55 +01002420
2421 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01002422 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelDesc);
2423 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01002424 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2425
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002426 armnn::DepthwiseConvolution2dQueueDescriptor data;
2427 armnn::WorkloadInfo info;
Cathal Corbett06902652022-04-14 17:55:11 +01002428
James Conroy1f58f032021-04-27 17:13:27 +01002429 armnn::ScopedTensorHandle weightsTensor(kernelDesc);
Cathal Corbett06902652022-04-14 17:55:11 +01002430 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
2431 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
2432 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
2433 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
Cathal Corbett4b19d222022-05-11 20:12:17 +01002434 AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data()); // required for QueueDescriptor
2435 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data()); // required for ConstantTensor
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002436
2437 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01002438 AddInputToWorkload(data, info, kernelDesc, weightsHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002439 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2440
Cathal Corbett06902652022-04-14 17:55:11 +01002441 armnn::ScopedTensorHandle biasTensor(biasDesc);
2442 if (biasEnabled)
2443 {
2444 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
2445
2446 biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
2447 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
2448 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
2449 }
2450
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002451 data.m_Weight = &weightsTensor;
2452 data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
2453 data.m_Parameters.m_StrideX = strideX;
2454 data.m_Parameters.m_StrideY = strideY;
2455 data.m_Parameters.m_PadLeft = padLeft;
2456 data.m_Parameters.m_PadRight = padRight;
2457 data.m_Parameters.m_PadTop = padTop;
2458 data.m_Parameters.m_PadBottom = padBottom;
2459 data.m_Parameters.m_BiasEnabled = biasEnabled;
2460 data.m_Parameters.m_DataLayout = layout;
2461 data.m_Parameters.m_DilationX = dilationX;
2462 data.m_Parameters.m_DilationY = dilationY;
2463
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002464 std::unique_ptr<armnn::IWorkload> workload
2465 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
Cathal Corbett06902652022-04-14 17:55:11 +01002466
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002467 inputHandle->Allocate();
2468 outputHandle->Allocate();
2469
Sadik Armagan483c8112021-06-01 09:24:52 +01002470 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002471
2472 ExecuteWorkload(*workload, memoryManager);
2473
Sadik Armagan483c8112021-06-01 09:24:52 +01002474 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002475
Sadik Armagan483c8112021-06-01 09:24:52 +01002476 return LayerTestResult<T, 4>(actualOutput,
2477 outputData,
2478 outputHandle->GetShape(),
2479 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002480}
2481
2482template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2483 typename T = armnn::ResolveType<ArmnnType>>
2484LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
2485 armnn::IWorkloadFactory& workloadFactory,
2486 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002487 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002488 float qScale,
2489 int32_t qOffset,
2490 bool biasEnabled,
2491 const armnn::DataLayout layout)
2492{
2493 // Use a single-batch 2-channel 5x5 image as input.
2494 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002495 auto input = QuantizedVector<T>(
2496 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002497 0, 1, 2, 3, 4,
2498 5, 6, 7, 8, 9,
2499 10, 11, 12, 13, 14,
2500 15, 16, 17, 18, 19,
2501 20, 21, 22, 23, 24,
2502
2503 25, 26, 27, 28, 29,
2504 30, 31, 32, 33, 34,
2505 35, 36, 37, 38, 39,
2506 40, 41, 42, 43, 44,
2507 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002508 },
2509 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002510 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002511
2512 // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
Jan Eilers53ef7952021-06-02 12:01:25 +01002513 // Weights layout for depthwise: [1,H,W,I*M]
2514 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2515 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002516 32, 31, 30, 29,
2517 28, 27, 26, 25,
2518 24, 23, 22, 21,
2519 20, 19, 18, 17,
2520
2521 16, 15, 14, 13,
2522 12, 11, 10, 9,
2523 8, 7, 6, 5,
2524 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002525 },
2526 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002527 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002528
2529 // Expected output is 1 batch of a 2-channel 5x5 image.
2530 // Calculated using the python tensorflow library with strideX=1, strideY=1.
2531 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002532 auto expectedOutput = QuantizedVector<T>(
2533 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002534 396, 664, 820, 756, 602, 1016, 1608, 1880, 1652, 1268, 1976, 2968, 3240, 2732,
2535 2028, 2628, 3808, 4060, 3312, 2390, 2596, 3700, 3900, 3130, 2226, 2817, 4186,
2536 4330, 3609, 2651, 5414, 7864, 8120, 6626, 4780, 6314, 9144, 9400, 7646, 5500,
2537 6759, 9610, 9850, 7875, 5579, 5935, 8348, 8540, 6757, 4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002538 },
2539 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002540 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002541
2542 return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2543 workloadFactory,
2544 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002545 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002546 input,
2547 kernel,
2548 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2549 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002550 inputTensorInfo.GetShape(),
2551 kernelTensorInfo.GetShape(),
2552 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002553 qScale,
2554 qOffset,
2555 layout,
2556 1, // Padding left.
2557 1, // Padding top.
2558 2, // Padding right.
2559 2, // Padding bottom.
2560 1, // strideX
2561 1); // strideY
2562}
2563
2564template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2565 typename T = armnn::ResolveType<ArmnnType>>
2566LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
2567 armnn::IWorkloadFactory& workloadFactory,
2568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002569 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002570 float qScale,
2571 int32_t qOffset,
2572 bool biasEnabled)
2573{
2574 auto layout = armnn::DataLayout::NHWC;
2575
2576 armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002577 auto input = QuantizedVector<T>(
2578 {
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002579 0, 1, 2, 3, 4,
2580 5, 6, 7, 8, 9,
2581 10, 11, 12, 13, 14,
2582 15, 16, 17, 18, 19,
2583 20, 21, 22, 23, 24,
2584
2585 25, 26, 27, 28, 29,
2586 30, 31, 32, 33, 34,
2587 35, 36, 37, 38, 39,
2588 40, 41, 42, 43, 44,
2589 45, 46, 47, 48, 49
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002590 },
2591 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002592 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002593
Jan Eilers53ef7952021-06-02 12:01:25 +01002594 armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2595 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002596 32, 31, 30, 29,
2597 28, 27, 26, 25,
2598 24, 23, 22, 21,
2599 20, 19, 18, 17,
2600
2601 16, 15, 14, 13,
2602 12, 11, 10, 9,
2603 8, 7, 6, 5,
2604 4, 3, 2, 1
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002605 },
2606 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002607 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002608
2609 armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
Sadik Armagan483c8112021-06-01 09:24:52 +01002610 auto expectedOutput = QuantizedVector<T>(
2611 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002612 396,664,820,756,602,
2613 1016,1608,1880,1652,1268,
2614 1976,2968,3240,2732,2028,
2615 2628,3808,4060,3312,2390,
2616 2596,3700,3900,3130,2226,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002617
Jan Eilers53ef7952021-06-02 12:01:25 +01002618 2817,4186,4330,3609,2651,
2619 5414,7864,8120,6626,4780,
2620 6314,9144,9400,7646,5500,
2621 6759,9610,9850,7875,5579,
2622 5935,8348,8540,6757,4742
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002623 },
2624 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002625 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002626
2627 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2628 workloadFactory,
2629 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002630 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002631 input,
2632 kernel,
2633 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2634 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002635 inputTensorInfo.GetShape(),
2636 kernelTensorInfo.GetShape(),
2637 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002638 qScale,
2639 qOffset,
2640 layout,
2641 1, // Padding left.
2642 1, // Padding top.
2643 2, // Padding right.
2644 2, // Padding bottom.
2645 1, // strideX
2646 1); // strideY
2647}
2648
2649template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
2650 typename T = armnn::ResolveType<ArmnnType>>
2651LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
2652 armnn::IWorkloadFactory& workloadFactory,
2653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002654 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002655 float qScale,
2656 int32_t qOffset,
2657 bool biasEnabled)
2658{
2659 auto layout = armnn::DataLayout::NHWC;
2660
Sadik Armagan483c8112021-06-01 09:24:52 +01002661 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
2662 auto input = QuantizedVector<T>(
2663 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002664 0, 0, 0, 0, 0, 0, 0, 0, 0,
2665 0, 0, 0, 0, 0, 0, 0, 0, 0,
2666 0, 0, 0, 0, 0, 0, 0, 0, 0,
2667 0, 0, 0, 1, 1, 1, 0, 0, 0,
2668 0, 0, 0, 1, 1, 1, 0, 0, 0,
2669 0, 0, 0, 1, 1, 1, 0, 0, 0,
2670 0, 0, 0, 0, 0, 0, 0, 0, 0,
2671 0, 0, 0, 0, 0, 0, 0, 0, 0,
2672 0, 0, 0, 0, 0, 0, 0, 0, 0
2673 },
2674 inputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002675 inputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002676
Jan Eilers53ef7952021-06-02 12:01:25 +01002677 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
2678 auto kernel = QuantizedVector<T>({
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002679 1, 2, 3,
2680 4, 5, 6,
2681 7, 8, 9
2682 },
2683 kernelTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002684 kernelTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002685
2686 uint32_t padLeft = 0;
2687 uint32_t padTop = 0;
2688 uint32_t padRight = 0;
2689 uint32_t padBottom = 0;
2690 uint32_t strideX = 1;
2691 uint32_t strideY = 1;
2692 uint32_t dilationX = 3;
2693 uint32_t dilationY = 3;
2694
2695 // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
Sadik Armagan483c8112021-06-01 09:24:52 +01002696 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
2697 auto expectedOutput = QuantizedVector<T>(
2698 {
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002699 5, 5, 5,
2700 5, 5, 5,
2701 5, 5, 5
2702 },
2703 outputTensorInfo.GetQuantizationScale(),
Sadik Armagan483c8112021-06-01 09:24:52 +01002704 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002705
2706 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2707 workloadFactory,
2708 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002709 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002710 input,
2711 kernel,
2712 GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2713 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002714 inputTensorInfo.GetShape(),
2715 kernelTensorInfo.GetShape(),
2716 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002717 qScale,
2718 qOffset,
2719 layout,
2720 padLeft,
2721 padTop,
2722 padRight,
2723 padBottom,
2724 strideX,
2725 strideY,
2726 dilationX,
2727 dilationY);
2728}
2729
2730template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
2731LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
2732 armnn::IWorkloadFactory& workloadFactory,
2733 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002734 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002735 const std::vector<float>& inputNoQuantizedValues,
2736 armnn::TensorInfo& inputTensorInfo,
2737 const std::vector<float>& kernelNoQuantizedValues,
2738 armnn::TensorInfo& kernelTensorInfo,
2739 const std::vector<float>& outputExpectedNoQuantizedValues,
2740 armnn::TensorInfo& outputTensorInfo,
2741 uint32_t dilationX,
2742 uint32_t dilationY,
2743 armnn::DataLayout layout = armnn::DataLayout::NCHW,
2744 bool biasEnabled = false)
2745{
2746 float qScale;
2747 int32_t qOffset;
2748 switch (ArmnnType)
2749 {
Sadik Armagan303980c2020-04-17 12:45:14 +01002750 case armnn::DataType::QAsymmS8:
Derek Lambertif90c56d2020-01-10 17:14:08 +00002751 case armnn::DataType::QAsymmU8:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002752 {
2753 qScale = 0.1f;
2754 qOffset = 128;
2755 break;
2756 }
Derek Lambertif90c56d2020-01-10 17:14:08 +00002757 case armnn::DataType::QSymmS16:
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002758 {
2759 qScale = 0.1f;
2760 qOffset = 0;
2761 break;
2762 }
2763 case armnn::DataType::Float32:
2764 default:
2765 {
2766 qScale = 0.f;
2767 qOffset = 0;
2768 break;
2769 }
2770 }
2771
2772 inputTensorInfo.SetQuantizationScale(qScale);
2773 inputTensorInfo.SetQuantizationOffset(qOffset);
2774 kernelTensorInfo.SetQuantizationScale(qScale);
2775 kernelTensorInfo.SetQuantizationOffset(qOffset);
2776 outputTensorInfo.SetQuantizationScale(qScale);
2777 outputTensorInfo.SetQuantizationOffset(qOffset);
2778
Sadik Armagan483c8112021-06-01 09:24:52 +01002779 auto input = QuantizedVector<T>(inputNoQuantizedValues,
2780 inputTensorInfo.GetQuantizationScale(),
2781 inputTensorInfo.GetQuantizationOffset());
2782 auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
2783 kernelTensorInfo.GetQuantizationScale(),
2784 kernelTensorInfo.GetQuantizationOffset());
2785 auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
2786 outputTensorInfo.GetQuantizationScale(),
2787 outputTensorInfo.GetQuantizationOffset());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002788
2789 uint32_t padLeft = 0;
2790 uint32_t padTop = 0;
2791 uint32_t padRight = 0;
2792 uint32_t padBottom = 0;
2793 uint32_t strideX = 1;
2794 uint32_t strideY = 1;
2795
2796 return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2797 workloadFactory,
2798 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002799 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002800 input,
2801 kernel,
2802 GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2803 expectedOutput,
Sadik Armagan483c8112021-06-01 09:24:52 +01002804 inputTensorInfo.GetShape(),
2805 kernelTensorInfo.GetShape(),
2806 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002807 qScale,
2808 qOffset,
2809 layout,
2810 padLeft,
2811 padTop,
2812 padRight,
2813 padBottom,
2814 strideX,
2815 strideY,
2816 dilationX,
2817 dilationY);
2818}
2819
2820template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2821LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test(
2822 armnn::IWorkloadFactory& workloadFactory,
2823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002824 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002825 bool biasEnabled,
2826 const armnn::DataLayout layout)
2827{
2828 armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
2829 std::vector<float> inputNoQuantizedValues =
2830 {
2831 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2832 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2833 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2834 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2835 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2836 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2837 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2838 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2839 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2840 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2841 };
2842
Jan Eilers53ef7952021-06-02 12:01:25 +01002843 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002844 std::vector<float> kernelNoQuantizedValues =
2845 {
2846 1, 2, 3,
2847 4, 5, 6,
2848 7, 8, 9
2849 };
2850
2851 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2852 // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2853 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
2854 std::vector<float> outputExpectedNoQuantizedValues =
2855 {
2856 6., 5., 5., 5.,
2857 6., 5., 5., 5.,
2858 6., 5., 5., 5.,
2859 3., 2., 2., 2.
2860 };
2861
2862 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2863 workloadFactory,
2864 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002865 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002866 inputNoQuantizedValues,
2867 inputTensorInfo,
2868 kernelNoQuantizedValues,
2869 kernelTensorInfo,
2870 outputExpectedNoQuantizedValues,
2871 outputTensorInfo,
2872 3,
2873 3,
2874 layout,
2875 biasEnabled);
2876}
2877
2878template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2879LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test(
2880 armnn::IWorkloadFactory& workloadFactory,
2881 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002882 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002883 bool biasEnabled,
2884 const armnn::DataLayout layout)
2885{
2886 armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
2887 std::vector<float> inputNoQuantizedValues =
2888 {
2889 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2890 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2891 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2892 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2893 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2894 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2895 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2896 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2897 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2898 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2899
2900 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2901 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2902 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2903 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2904 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2905 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2906 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2907 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2908 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2909 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2910 };
2911
Jan Eilers53ef7952021-06-02 12:01:25 +01002912 armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 2}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002913 std::vector<float> kernelNoQuantizedValues =
2914 {
2915 1, 2, 3,
2916 4, 5, 6,
2917 7, 8, 9,
2918
2919 1, 2, 3,
2920 4, 5, 6,
2921 7, 8, 9
2922 };
2923
2924 // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2925 // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2926 armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
2927 std::vector<float> outputExpectedNoQuantizedValues =
2928 {
Jan Eilers53ef7952021-06-02 12:01:25 +01002929 2, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 5, 3, 3, 3, 3,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002930
Jan Eilers53ef7952021-06-02 12:01:25 +01002931 1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 6, 4, 4, 4
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002932 };
2933
2934 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2935 workloadFactory,
2936 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002937 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002938 inputNoQuantizedValues,
2939 inputTensorInfo,
2940 kernelNoQuantizedValues,
2941 kernelTensorInfo,
2942 outputExpectedNoQuantizedValues,
2943 outputTensorInfo,
2944 3,
2945 3,
2946 layout,
2947 biasEnabled);
2948}
2949
2950template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
2951LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test(
2952 armnn::IWorkloadFactory& workloadFactory,
2953 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01002954 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002955 bool biasEnabled,
2956 const armnn::DataLayout layout)
2957{
2958 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2959 std::vector<float> inputNoQuantizedValues =
2960 {
2961 10.0, 10.0, 10.0,
2962 10.0, 10.0, 10.0,
2963 10.0, 10.0, 10.0,
2964
2965 21.0, 22.0, 23.0,
2966 24.0, 25.0, 26.0,
2967 27.0, 28.0, 29.0
2968 };
2969
Jan Eilers53ef7952021-06-02 12:01:25 +01002970 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 8}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002971
2972 std::vector<float> kernelNoQuantizedValues =
2973 {
2974 0.25f, 0.25f,
2975 0.25f, 0.25f,
2976
2977 0.25f, 0.25f,
2978 0.25f, 0.25f,
2979
2980 0.0f , 0.0f,
2981 0.0f , 0.1f,
2982
2983 0.0f , 0.0f,
2984 0.0f , 0.1f,
2985
2986 0.2f , 0.0f,
2987 0.0f , 0.0f,
2988
2989 0.2f , 0.0f,
2990 0.0f , 0.0f,
2991
2992 0.0f , 0.3f,
2993 0.0f , 0.0f,
2994
2995 0.0f , 0.3f,
2996 0.0f , 0.0f
2997 };
2998
2999 armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType);
3000 std::vector<float> outputExpectedNoQuantizedValues =
3001 {
Jan Eilers53ef7952021-06-02 12:01:25 +01003002 4.5f, 4.5f, 4.5f, 4.5f, 5.5f, 5.5f, 5.5f, 5.5f,
3003 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f,
3004 10.05f, 10.5f, 11.4f, 11.85f, 12.75f, 13.3f, 14.4f, 14.95f,
3005 5.25f, 5.5f, 6.0f, 6.25f, 7.45f, 7.8f, 8.5f, 8.85f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003006 };
3007
3008
3009 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
3010 workloadFactory,
3011 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003012 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003013 inputNoQuantizedValues,
3014 inputTensorInfo,
3015 kernelNoQuantizedValues,
3016 kernelTensorInfo,
3017 outputExpectedNoQuantizedValues,
3018 outputTensorInfo,
3019 1,
3020 1,
3021 layout,
3022 biasEnabled);
3023}
3024
3025template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T>
3026LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test(
3027 armnn::IWorkloadFactory& workloadFactory,
3028 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003029 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003030 bool biasEnabled,
3031 const armnn::DataLayout layout)
3032{
3033 armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
3034 std::vector<float> inputNoQuantizedValues =
3035 {
3036 10.0, 10.0, 10.0,
3037 10.0, 10.0, 10.0,
3038 10.0, 10.0, 10.0,
3039
3040 21.0, 22.0, 23.0,
3041 24.0, 25.0, 26.0,
3042 27.0, 28.0, 29.0
3043 };
3044
Jan Eilers53ef7952021-06-02 12:01:25 +01003045 armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 4}, ArmnnType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003046
3047 std::vector<float> kernelNoQuantizedValues =
3048 {
3049 0.25f, 0.25f,
3050 0.25f, 0.25f,
3051
3052 0.2f , 0.0f,
3053 0.0f , 0.0f,
3054
3055 0.0f , 0.0f,
3056 0.0f , 0.1f,
3057
3058 0.0f , 0.3f,
3059 0.0f , 0.0f
3060
3061 };
3062
3063 armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType);
3064 std::vector<float> outputExpectedNoQuantizedValues =
3065 {
Jan Eilers53ef7952021-06-02 12:01:25 +01003066 4.5f, 4.5f, 4.5f, 4.5f,
3067 5.5f, 5.5f, 5.5f, 5.5f,
3068 5.25f, 5.5f, 6.0f, 6.25f,
3069 7.65f, 8.0f, 8.7f, 9.05f
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003070 };
3071
3072
3073 return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
3074 workloadFactory,
3075 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003076 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003077 inputNoQuantizedValues,
3078 inputTensorInfo,
3079 kernelNoQuantizedValues,
3080 kernelTensorInfo,
3081 outputExpectedNoQuantizedValues,
3082 outputTensorInfo,
3083 1,
3084 1,
3085 layout,
3086 biasEnabled);
3087}
3088
3089template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
3090LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
3091 armnn::IWorkloadFactory& workloadFactory,
3092 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3093 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003094 const armnn::ITensorHandleFactory& tensorHandleFactory,
3095 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003096 const armnnUtils::DataLayoutIndexed& layout)
3097{
3098 unsigned int inputHeight = 8;
3099 unsigned int inputWidth = 16;
3100 unsigned int inputChannels = 3;
3101 unsigned int inputNum = 5;
3102
3103 unsigned int kernelHeight = 3;
3104 unsigned int kernelWidth = 3;
3105 unsigned int channelMultiplier = 1;
3106
3107 unsigned int strideX = 2;
3108 unsigned int strideY = 3;
3109 unsigned int padX = 1;
3110 unsigned int padY = 1;
3111
3112 unsigned int outputNum = inputNum;
3113 unsigned int outputChannels = inputChannels * channelMultiplier;
3114 unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
3115 unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
3116
3117 armnn::TensorInfo inputTensorInfo;
3118 armnn::TensorInfo outputTensorInfo;
3119 armnn::TensorInfo kernelDesc;
3120 armnn::TensorInfo biasDesc;
3121
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003122 std::vector<unsigned int> inputShape;
3123 std::vector<unsigned int> outputShape;
Jan Eilers53ef7952021-06-02 12:01:25 +01003124 std::vector<unsigned int> kernelShape{ 1, kernelHeight, kernelWidth, outputChannels };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003125 std::vector<unsigned int> biasShape{ outputChannels };
3126 switch (layout.GetDataLayout())
3127 {
3128 case armnn::DataLayout::NCHW:
3129 inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
3130 outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
3131 break;
3132 case armnn::DataLayout ::NHWC:
3133 inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
3134 outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
3135 break;
3136 default:
3137 throw armnn::InvalidArgumentException("unknown data layout ["
3138 + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
3139 }
3140
3141 float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
3142 float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
3143 int32_t qOffset = 0;
3144
3145 inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
3146 outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
3147 kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
Sadik Armagan483c8112021-06-01 09:24:52 +01003148 biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003149
Sadik Armagan483c8112021-06-01 09:24:52 +01003150 auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
3151 auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
3152 auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003153
Cathal Corbett4b19d222022-05-11 20:12:17 +01003154 armnn::TensorInfo aclKernelDescriptor = kernelDesc;
3155 std::vector<T> aclKernelData;
3156 aclKernelData.assign(kernel.data(), kernel.data() + kernelHeight * kernelWidth * outputChannels);
3157 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
3158 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
3159 {
3160 if (layout == armnn::DataLayout::NCHW)
3161 {
3162 std::vector<T> tmp(kernel.size());
3163 aclKernelDescriptor.SetShape(armnnUtils::Permuted(kernelDesc.GetShape(), {0, 2, 3, 1}));
3164 armnnUtils::Permute(kernelDesc.GetShape(), {0, 2, 3, 1}, kernel.data(), tmp.data(), sizeof(T));
3165 aclKernelData = tmp;
3166 }
3167 }
3168
Sadik Armagan483c8112021-06-01 09:24:52 +01003169 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
3170 std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
Keith Davisf500d6c2020-08-31 08:32:55 +01003171
3172 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
Cathal Corbett4b19d222022-05-11 20:12:17 +01003173 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(aclKernelDescriptor);
Cathal Corbett06902652022-04-14 17:55:11 +01003174 std::unique_ptr<armnn::ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01003175 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
3176
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003177 armnn::DepthwiseConvolution2dQueueDescriptor data;
3178 armnn::WorkloadInfo info;
Cathal Corbett06902652022-04-14 17:55:11 +01003179
Cathal Corbett4b19d222022-05-11 20:12:17 +01003180 armnn::ScopedTensorHandle weightsTensor(aclKernelDescriptor);
James Conroy1f58f032021-04-27 17:13:27 +01003181 armnn::ScopedTensorHandle biasTensor(biasDesc);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003182
Cathal Corbett06902652022-04-14 17:55:11 +01003183 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
Cathal Corbett4b19d222022-05-11 20:12:17 +01003184 AddInputToWorkload(data, info, aclKernelDescriptor, weightsHandle.get());
Cathal Corbett06902652022-04-14 17:55:11 +01003185 AddInputToWorkload(data, info, biasDesc, biasHandle.get());
3186 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
3187
3188 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
3189 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
3190 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
3191 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
Cathal Corbett4b19d222022-05-11 20:12:17 +01003192 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), aclKernelData.data());
3193 AllocateAndCopyDataToITensorHandle(&weightsTensor, aclKernelData.data());
Cathal Corbett06902652022-04-14 17:55:11 +01003194 AllocateAndCopyDataToITensorHandle(biasHandle.get(), bias.data());
Sadik Armagan483c8112021-06-01 09:24:52 +01003195 AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003196
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003197 data.m_Weight = &weightsTensor;
3198 data.m_Bias = &biasTensor;
3199 data.m_Parameters.m_StrideX = strideX;
3200 data.m_Parameters.m_StrideY = strideY;
3201 data.m_Parameters.m_PadLeft = padX;
3202 data.m_Parameters.m_PadRight = padX;
3203 data.m_Parameters.m_PadTop = padY;
3204 data.m_Parameters.m_PadBottom = padY;
3205 data.m_Parameters.m_BiasEnabled = true;
3206 data.m_Parameters.m_DataLayout = layout.GetDataLayout();
Keith Davisf500d6c2020-08-31 08:32:55 +01003207
3208 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01003209 std::unique_ptr<armnn::ITensorHandle> weightsHandleRef = refTensorHandleFactory.CreateTensorHandle(kernelDesc);
3210 std::unique_ptr<armnn::ITensorHandle> biasHandleRef = refTensorHandleFactory.CreateTensorHandle(biasDesc);
Keith Davisf500d6c2020-08-31 08:32:55 +01003211 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
3212
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003213 armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
3214 armnn::WorkloadInfo refInfo = info;
3215 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
Cathal Corbett06902652022-04-14 17:55:11 +01003216 SetWorkloadInput(refData, refInfo, 1, kernelDesc, weightsHandleRef.get());
3217 SetWorkloadInput(refData, refInfo, 2, biasDesc, biasHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003218 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
3219
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003220 std::unique_ptr<armnn::IWorkload> workload
3221 = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, data, info);
3222 std::unique_ptr<armnn::IWorkload> workloadRef
3223 = refWorkloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d, refData, refInfo);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003224
3225 outputHandleRef->Allocate();
Cathal Corbett06902652022-04-14 17:55:11 +01003226 weightsHandleRef->Allocate();
3227 biasHandleRef->Allocate();
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003228 inputHandleRef->Allocate();
3229
3230 inputHandle->Allocate();
3231 outputHandle->Allocate();
3232
Sadik Armagan483c8112021-06-01 09:24:52 +01003233 CopyDataToITensorHandle(inputHandle.get(), input.data());
3234 CopyDataToITensorHandle(inputHandleRef.get(), input.data());
Cathal Corbett06902652022-04-14 17:55:11 +01003235 CopyDataToITensorHandle(weightsHandleRef.get(), kernel.data());
3236 CopyDataToITensorHandle(biasHandleRef.get(), bias.data());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003237
3238 ExecuteWorkload(*workload, memoryManager);
3239
3240 workloadRef->PostAllocationConfigure();
3241 workloadRef->Execute();
3242
Sadik Armagan483c8112021-06-01 09:24:52 +01003243 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3244 CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003245
Sadik Armagan483c8112021-06-01 09:24:52 +01003246 return LayerTestResult<T, 4>(actualOutput,
3247 expectedOutput,
3248 outputHandle->GetShape(),
3249 outputTensorInfo.GetShape());
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003250}
3251
3252//
3253// Explicit template specializations
3254//
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003255template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3256Convolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3257 armnn::IWorkloadFactory&,
3258 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003259 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003260 bool,
3261 armnn::DataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003262
3263template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3264Convolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3265 armnn::IWorkloadFactory&,
3266 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003267 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003268 bool,
3269 armnn::DataLayout);
3270
Sadik Armagan303980c2020-04-17 12:45:14 +01003271template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3272Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3273 armnn::IWorkloadFactory&,
3274 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003275 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003276 bool,
3277 armnn::DataLayout);
3278
Derek Lambertif90c56d2020-01-10 17:14:08 +00003279template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3280Convolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003281 armnn::IWorkloadFactory&,
3282 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003283 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003284 bool,
3285 armnn::DataLayout);
3286
Derek Lambertif90c56d2020-01-10 17:14:08 +00003287template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3288Convolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003289 armnn::IWorkloadFactory&,
3290 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003291 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003292 bool,
3293 armnn::DataLayout);
3294
3295template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3296Convolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3297 armnn::IWorkloadFactory&,
3298 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003299 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003300 bool,
3301 armnn::DataLayout);
3302
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003303template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3304Convolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3305 armnn::IWorkloadFactory&,
3306 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003307 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003308 bool,
3309 armnn::DataLayout);
3310
Sadik Armagan303980c2020-04-17 12:45:14 +01003311template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3312Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3313 armnn::IWorkloadFactory&,
3314 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003315 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003316 bool,
3317 armnn::DataLayout);
3318
Derek Lambertif90c56d2020-01-10 17:14:08 +00003319template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3320Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003321 armnn::IWorkloadFactory&,
3322 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003323 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003324 bool,
3325 armnn::DataLayout);
3326
Derek Lambertif90c56d2020-01-10 17:14:08 +00003327template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3328Convolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003329 armnn::IWorkloadFactory&,
3330 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003331 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003332 bool,
3333 armnn::DataLayout);
3334
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003335template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3336Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3337 armnn::IWorkloadFactory &workloadFactory,
3338 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003339 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003340 bool biasEnabled,
3341 const armnn::DataLayout layout);
3342
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003343template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3344Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3345 armnn::IWorkloadFactory &workloadFactory,
3346 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003347 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003348 bool biasEnabled,
3349 const armnn::DataLayout layout);
3350
Sadik Armagan303980c2020-04-17 12:45:14 +01003351template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3352Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3353 armnn::IWorkloadFactory &workloadFactory,
3354 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003355 const armnn::ITensorHandleFactory& tensorHandleFactory,
Sadik Armagan303980c2020-04-17 12:45:14 +01003356 bool biasEnabled,
3357 const armnn::DataLayout layout);
3358
Derek Lambertif90c56d2020-01-10 17:14:08 +00003359template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3360Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003361 armnn::IWorkloadFactory &workloadFactory,
3362 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003363 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003364 bool biasEnabled,
3365 const armnn::DataLayout layout);
3366
Derek Lambertif90c56d2020-01-10 17:14:08 +00003367template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3368Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003369 armnn::IWorkloadFactory &workloadFactory,
3370 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003371 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003372 bool biasEnabled,
3373 const armnn::DataLayout layout);
3374
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003375template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3376DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3377 armnn::IWorkloadFactory&,
3378 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003379 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003380 bool,
3381 armnn::DataLayout);
3382
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003383template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3384DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3385 armnn::IWorkloadFactory&,
3386 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003387 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003388 bool,
3389 armnn::DataLayout);
3390
Sadik Armagan303980c2020-04-17 12:45:14 +01003391template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3392DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3393 armnn::IWorkloadFactory&,
3394 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003395 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003396 bool,
3397 armnn::DataLayout);
3398
Derek Lambertif90c56d2020-01-10 17:14:08 +00003399template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3400DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003401 armnn::IWorkloadFactory&,
3402 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003403 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003404 bool,
3405 armnn::DataLayout);
3406
Derek Lambertif90c56d2020-01-10 17:14:08 +00003407template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3408DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003409 armnn::IWorkloadFactory&,
3410 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003411 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003412 bool,
3413 armnn::DataLayout);
3414
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003415template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3416DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3417 armnn::IWorkloadFactory&,
3418 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003419 const armnn::ITensorHandleFactory&,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003420 bool,
3421 armnn::DataLayout);
3422
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003423template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3424DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3425 armnn::IWorkloadFactory&,
3426 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003427 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003428 bool,
3429 armnn::DataLayout);
3430
Sadik Armagan303980c2020-04-17 12:45:14 +01003431template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
3432DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(
3433 armnn::IWorkloadFactory&,
3434 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003435 const armnn::ITensorHandleFactory&,
Sadik Armagan303980c2020-04-17 12:45:14 +01003436 bool,
3437 armnn::DataLayout);
3438
Derek Lambertif90c56d2020-01-10 17:14:08 +00003439template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
3440DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003441 armnn::IWorkloadFactory&,
3442 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003443 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003444 bool,
3445 armnn::DataLayout);
3446
Derek Lambertif90c56d2020-01-10 17:14:08 +00003447template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
3448DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003449 armnn::IWorkloadFactory&,
3450 const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
Keith Davisf500d6c2020-08-31 08:32:55 +01003451 const armnn::ITensorHandleFactory&,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003452 bool,
3453 armnn::DataLayout);
3454
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003455template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3456DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3457 armnn::IWorkloadFactory &workloadFactory,
3458 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003459 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003460 bool biasEnabled,
3461 const armnn::DataLayout layout);
3462
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003463template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3464DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3465 armnn::IWorkloadFactory &workloadFactory,
3466 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003467 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003468 bool biasEnabled,
3469 const armnn::DataLayout layout);
3470
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003471template LayerTestResult<armnn::ResolveType<armnn::DataType::BFloat16>, 4>
3472DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>(
3473 armnn::IWorkloadFactory &workloadFactory,
3474 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003475 const armnn::ITensorHandleFactory& tensorHandleFactory,
Narumol Prangnawarat44179c32020-03-11 14:51:27 +00003476 bool biasEnabled,
3477 const armnn::DataLayout layout);
3478
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003479template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
3480DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>(
3481 armnn::IWorkloadFactory &workloadFactory,
3482 const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003483 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003484 bool biasEnabled,
3485 const armnn::DataLayout layout);
3486
3487//
3488// Implementation functions
3489//
3490
3491LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
3492 armnn::IWorkloadFactory& workloadFactory,
3493 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003494 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003495 bool biasEnabled,
3496 const armnn::DataLayout layout)
3497{
3498 return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003499 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003500}
3501
3502LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
3503 armnn::IWorkloadFactory& workloadFactory,
3504 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003505 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003506 bool biasEnabled,
3507 const armnn::DataLayout layout)
3508{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003509 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003510 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003511}
3512
3513LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
3514 armnn::IWorkloadFactory& workloadFactory,
3515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003516 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003517 bool biasEnabled,
3518 const armnn::DataLayout layout)
3519{
3520 return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003521 workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003522}
3523
3524LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
3525 armnn::IWorkloadFactory& workloadFactory,
3526 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003527 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003528 bool biasEnabled)
3529{
3530 return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3531 workloadFactory,
3532 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003533 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003534 0.f,
3535 0,
3536 biasEnabled,
3537 armnn::DataLayout::NHWC);
3538}
3539
3540LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
3541 armnn::IWorkloadFactory& workloadFactory,
3542 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003543 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003544 bool biasEnabled,
3545 const armnn::DataLayout layout)
3546{
3547 return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3548 workloadFactory,
3549 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003550 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003551 0.f,
3552 0,
3553 biasEnabled,
3554 layout);
3555}
3556
3557LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
3558 armnn::IWorkloadFactory& workloadFactory,
3559 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003560 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003561 bool biasEnabled,
3562 const armnn::DataLayout layout)
3563{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003564 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003565 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003566}
3567
3568LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test(
3569 armnn::IWorkloadFactory& workloadFactory,
3570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003571 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003572 bool biasEnabled,
3573 const armnn::DataLayout layout)
3574{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003575 return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003576 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003577}
3578
3579LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test(
3580 armnn::IWorkloadFactory& workloadFactory,
3581 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003582 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003583 bool biasEnabled,
3584 const armnn::DataLayout layout)
3585{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003586 return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003587 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003588}
3589
3590LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
3591 armnn::IWorkloadFactory& workloadFactory,
3592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003593 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003594 armnn::DataLayout layout)
3595{
3596 return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003597 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003598}
3599
3600LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(
3601 armnn::IWorkloadFactory& workloadFactory,
3602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003603 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003604 armnn::DataLayout layout)
3605{
3606 return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
3607 <armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003608 workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003609}
3610
3611LayerTestResult<float, 4> Convolution1dTest(
3612 armnn::IWorkloadFactory& workloadFactory,
3613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003614 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003615 bool biasEnabled)
3616{
3617 return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003618 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003619}
3620
3621LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
3622 armnn::IWorkloadFactory& workloadFactory,
3623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003624 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003625 bool biasEnabled)
3626{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003627 return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003628 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003629}
3630
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003631LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest(
3632 armnn::IWorkloadFactory& workloadFactory,
3633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003634 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003635 const armnn::DataLayout layout)
3636{
3637 using namespace armnn;
3638
Derek Lambertif90c56d2020-01-10 17:14:08 +00003639 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003640 const DataType kernelType = DataType::QSymmS8;
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003641 const DataType biasType = DataType::Signed32;
3642
3643 TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3644 TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3645
3646 const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3647 constexpr unsigned int quantDimension = 0;
3648
3649 TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3650
3651 const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3652 TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3653
3654 std::vector<uint8_t> inputData =
3655 {
3656 138, 108, 138, 108, 138, 108
3657 };
3658
3659 std::vector<int8_t> kernelData =
3660 {
3661 1, 2, 1, 2, 1, 2
3662 };
3663
3664 std::vector<int32_t> biasData =
3665 {
3666 4, 4, 4
3667 };
3668
3669 std::vector<uint8_t> expectedOutputData =
3670 {
3671 121, 118, 115, 121, 118, 115, 121, 118, 115
3672 };
3673
3674 if (layout == DataLayout::NCHW)
3675 {
3676 PermuteTensorNhwcToNchw(inputInfo, inputData);
3677 PermuteTensorNhwcToNchw(kernelInfo, kernelData);
3678 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3679 }
3680
Sadik Armagan483c8112021-06-01 09:24:52 +01003681 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3682
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003683 Convolution2dDescriptor descriptor;
3684 descriptor.m_StrideX = 1;
3685 descriptor.m_StrideY = 1;
3686 descriptor.m_PadLeft = 0;
3687 descriptor.m_PadRight = 0;
3688 descriptor.m_PadTop = 0;
3689 descriptor.m_PadBottom = 0;
3690 descriptor.m_BiasEnabled = true;
3691 descriptor.m_DataLayout = layout;
3692
Keith Davisf500d6c2020-08-31 08:32:55 +01003693 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3694 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003695 std::unique_ptr<armnn::ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
3696 std::unique_ptr<armnn::ITensorHandle> biasHandle = nullptr;
Keith Davisf500d6c2020-08-31 08:32:55 +01003697
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003698 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +01003699 ScopedTensorHandle weightTensor(kernelInfo);
3700 ScopedTensorHandle biasTensor(biasInfo);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003701
3702 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3703 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3704
3705 Convolution2dQueueDescriptor queueDescriptor;
3706 queueDescriptor.m_Parameters = descriptor;
3707 queueDescriptor.m_Weight = &weightTensor;
3708 queueDescriptor.m_Bias = &biasTensor;
3709
3710 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003711 AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
3712
3713 if (descriptor.m_BiasEnabled)
3714 {
3715 biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
3716 AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
3717 }
3718
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003719 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3720
Teresa Charlin611c7fb2022-01-07 09:47:29 +00003721 std::unique_ptr<IWorkload> workload= workloadFactory.CreateWorkload(armnn::LayerType::Convolution2d,
3722 queueDescriptor,
3723 workloadInfo);
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003724 inputHandle->Allocate();
3725 outputHandle->Allocate();
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003726 weightsHandle->Allocate();
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003727
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003728 if (descriptor.m_BiasEnabled)
3729 {
3730 biasHandle->Allocate();
3731 CopyDataToITensorHandle(biasHandle.get(), biasData.data());
3732 }
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003733 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
Keith Davisb4dd5cc2022-04-07 11:32:00 +01003734 CopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
3735
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003736
3737 ExecuteWorkload(*workload, memoryManager);
3738
Sadik Armagan483c8112021-06-01 09:24:52 +01003739 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003740
Sadik Armagan483c8112021-06-01 09:24:52 +01003741 return LayerTestResult<uint8_t, 4>(actualOutput,
3742 expectedOutputData,
3743 outputHandle->GetShape(),
3744 outputInfo.GetShape());
Aron Virginas-Tar5edc8812019-11-05 18:00:21 +00003745}
3746
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003747LayerTestResult<float,4> CompareConvolution2dTest(
3748 armnn::IWorkloadFactory& workloadFactory,
3749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003750 armnn::IWorkloadFactory& refWorkloadFactory,
3751 const armnn::ITensorHandleFactory& tensorHandleFactory,
3752 const armnn::ITensorHandleFactory& refTensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003753{
3754 return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003755 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003756}
3757
3758LayerTestResult<float, 4> DepthwiseConvolution2dTest(
3759 armnn::IWorkloadFactory& workloadFactory,
3760 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003761 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003762 bool biasEnabled,
3763 const armnn::DataLayout layout)
3764{
3765 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003766 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003767}
3768
3769LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(
3770 armnn::IWorkloadFactory& workloadFactory,
3771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003772 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003773 bool biasEnabled)
3774{
3775 return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003776 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003777}
3778
3779LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
3780 armnn::IWorkloadFactory& workloadFactory,
3781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003782 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003783 bool biasEnabled,
3784 const armnn::DataLayout layout)
3785{
3786 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003787 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003788}
3789
3790LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test(
3791 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3793 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003794{
3795 armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
Sadik Armagan483c8112021-06-01 09:24:52 +01003796 std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003797
3798 std::vector<float> kernelData;
3799 std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3800 for (unsigned int i = 0; i < 64; ++i)
3801 {
3802 kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3803 }
3804 armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003805
Jan Eilers53ef7952021-06-02 12:01:25 +01003806 // permute from [O,1,H,W] --> [1,H,W,O]
3807 armnn::PermutationVector permutationVector {3,0,1,2};
3808 kernelTensorInfo = armnnUtils::Permuted(kernelTensorInfo, permutationVector);
3809 std::vector<float> kernelPermuted(kernelTensorInfo.GetNumElements());
3810 armnnUtils::Permute(kernelTensorInfo.GetShape(), permutationVector,
3811 kernelData.data(), kernelPermuted.data(),
3812 GetDataTypeSize(kernelTensorInfo.GetDataType()));
3813
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003814 std::vector<float> expectedOutputData(64, 0.f);
3815 armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003816
3817 return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3818 workloadFactory,
3819 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003820 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003821 input,
Jan Eilers53ef7952021-06-02 12:01:25 +01003822 kernelPermuted,
Sadik Armagan483c8112021-06-01 09:24:52 +01003823 std::vector<float>(),
3824 expectedOutputData,
3825 inputTensorInfo.GetShape(),
3826 kernelTensorInfo.GetShape(),
3827 outputTensorInfo.GetShape(),
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003828 0.f,
3829 0,
3830 armnn::DataLayout::NCHW);
3831}
3832
3833LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(
3834 armnn::IWorkloadFactory& workloadFactory,
3835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003836 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003837 bool biasEnabled,
3838 const armnn::DataLayout layout)
3839{
3840 return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003841 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003842}
3843
3844LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(
3845 armnn::IWorkloadFactory& workloadFactory,
3846 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003847 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003848 bool biasEnabled,
3849 const armnn::DataLayout layout)
3850{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003851 return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003852 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003853}
3854
3855LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(
3856 armnn::IWorkloadFactory& workloadFactory,
3857 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003858 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003859 bool biasEnabled,
3860 const armnn::DataLayout layout)
3861{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003862 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003863 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003864}
3865
3866LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(
3867 armnn::IWorkloadFactory& workloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01003868 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
3869 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003870{
3871 return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3872 workloadFactory,
3873 memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003874 tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003875 0.f,
3876 0,
3877 false);
3878}
3879
3880LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test(
3881 armnn::IWorkloadFactory& workloadFactory,
3882 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003883 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003884 bool biasEnabled,
3885 const armnn::DataLayout layout)
3886{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003887 return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003888 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003889}
3890
3891LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test(
3892 armnn::IWorkloadFactory& workloadFactory,
3893 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003894 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003895 bool biasEnabled,
3896 const armnn::DataLayout layout)
3897{
Derek Lambertif90c56d2020-01-10 17:14:08 +00003898 return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01003899 workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01003900}
3901
Teresa Charlind8df0262019-11-11 12:28:15 +00003902LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest(
3903 armnn::IWorkloadFactory& workloadFactory,
3904 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Keith Davisf500d6c2020-08-31 08:32:55 +01003905 const armnn::ITensorHandleFactory& tensorHandleFactory,
Teresa Charlind8df0262019-11-11 12:28:15 +00003906 const armnn::DataLayout layout)
3907{
3908 using namespace armnn;
3909
Derek Lambertif90c56d2020-01-10 17:14:08 +00003910 const DataType inputType = DataType::QAsymmU8;
Derek Lambertid466a542020-01-22 15:37:29 +00003911 const DataType kernelType = DataType::QSymmS8;
Teresa Charlind8df0262019-11-11 12:28:15 +00003912 const DataType biasType = DataType::Signed32;
3913
3914 TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128); // N H W C
3915 TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128); // N H W C
3916
3917 const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
Jan Eilers53ef7952021-06-02 12:01:25 +01003918 const unsigned int quantDimension = 3;
3919 TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension); // [1, H, W, I*M]
Teresa Charlind8df0262019-11-11 12:28:15 +00003920
3921 const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3922 constexpr unsigned int biasQuantDimension = 0;
3923 TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3924
3925 std::vector<uint8_t> inputData =
3926 {
3927 129, 130,
3928 129, 130,
3929 129, 130,
3930 129, 130,
3931 129, 130,
3932 129, 130,
3933 129, 130,
3934 129, 130,
3935 129, 130
3936 };
3937
3938 std::vector<int8_t> kernelData =
3939 {
3940 1, 1, 1, 1,
3941 1, 1, 1, 1,
3942 1, 1, 1, 1,
3943 1, 1, 1, 1
3944 };
3945
Cathal Corbett4b19d222022-05-11 20:12:17 +01003946 if (workloadFactory.GetBackendId() == armnn::BackendId("GpuAcc") ||
3947 workloadFactory.GetBackendId() == armnn::BackendId("CpuAcc"))
3948 {
3949 if (layout == armnn::DataLayout::NCHW)
3950 {
3951 std::vector<int8_t> tmp(kernelData.size());
3952 kernelInfo.SetShape(armnnUtils::Permuted(kernelInfo.GetShape(), {0, 2, 3, 1}));
3953 armnnUtils::Permute(kernelInfo.GetShape(), {0, 2, 3, 1}, kernelData.data(), tmp.data(), sizeof(int8_t));
3954 kernelData = tmp;
3955 }
3956 }
3957
Teresa Charlind8df0262019-11-11 12:28:15 +00003958 std::vector<int32_t> biasData =
3959 {
3960 4, 4, 4, 4
3961 };
3962
3963 std::vector<uint8_t> expectedOutputData =
3964 {
3965 132, 130, 134, 131,
3966 132, 130, 134, 131,
3967 132, 130, 134, 131,
3968 132, 130, 134, 131
3969 };
3970
3971 if (layout == DataLayout::NCHW)
3972 {
3973 PermuteTensorNhwcToNchw(inputInfo, inputData);
3974 PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3975 }
3976
Sadik Armagan483c8112021-06-01 09:24:52 +01003977 std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3978
Teresa Charlind8df0262019-11-11 12:28:15 +00003979 DepthwiseConvolution2dDescriptor descriptor;
3980 descriptor.m_StrideX = 1;
3981 descriptor.m_StrideY = 1;
3982 descriptor.m_PadLeft = 0;
3983 descriptor.m_PadRight = 0;
3984 descriptor.m_PadTop = 0;
3985 descriptor.m_PadBottom = 0;
3986 descriptor.m_DilationX = 1;
3987 descriptor.m_DilationY = 1;
3988 descriptor.m_BiasEnabled = true;
3989 descriptor.m_DataLayout = layout;
3990
Keith Davisf500d6c2020-08-31 08:32:55 +01003991 std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
Cathal Corbett06902652022-04-14 17:55:11 +01003992 std::unique_ptr<ITensorHandle> weightsHandle = tensorHandleFactory.CreateTensorHandle(kernelInfo);
3993 std::unique_ptr<ITensorHandle> biasHandle = tensorHandleFactory.CreateTensorHandle(biasInfo);
Keith Davisf500d6c2020-08-31 08:32:55 +01003994 std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00003995
Cathal Corbett06902652022-04-14 17:55:11 +01003996 DepthwiseConvolution2dQueueDescriptor queueDescriptor;
Teresa Charlind8df0262019-11-11 12:28:15 +00003997 WorkloadInfo workloadInfo;
James Conroy1f58f032021-04-27 17:13:27 +01003998 ScopedTensorHandle weightTensor(kernelInfo);
3999 ScopedTensorHandle biasTensor(biasInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00004000
Cathal Corbett06902652022-04-14 17:55:11 +01004001 AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
4002 AddInputToWorkload(queueDescriptor, workloadInfo, kernelInfo, weightsHandle.get());
4003 AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
4004 AddInputToWorkload(queueDescriptor, workloadInfo, biasInfo, biasHandle.get());
4005
4006 // AllocateAndCopyDataToITensorHandle() is required twice for the weights AND biases:
4007 // See comment in DepthwiseConvolution2dAsymmetricTestImpl() for reasons.
4008 // 1) ScopedTensorHandle (weightsTensor) required for QueueDescriptor (data.m_Weight).
4009 // 2) ITensorHandle (converts to Backend TensorHandle) required in RefWorkload for GetTensorInfo() method.
4010 AllocateAndCopyDataToITensorHandle(weightsHandle.get(), kernelData.data());
Teresa Charlind8df0262019-11-11 12:28:15 +00004011 AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
Cathal Corbett06902652022-04-14 17:55:11 +01004012 AllocateAndCopyDataToITensorHandle(biasHandle.get(), biasData.data());
Teresa Charlind8df0262019-11-11 12:28:15 +00004013 AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
4014
Teresa Charlind8df0262019-11-11 12:28:15 +00004015 queueDescriptor.m_Parameters = descriptor;
4016 queueDescriptor.m_Weight = &weightTensor;
4017 queueDescriptor.m_Bias = &biasTensor;
4018
Teresa Charlin611c7fb2022-01-07 09:47:29 +00004019 std::unique_ptr<IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::DepthwiseConvolution2d,
4020 queueDescriptor,
4021 workloadInfo);
Teresa Charlind8df0262019-11-11 12:28:15 +00004022 inputHandle->Allocate();
4023 outputHandle->Allocate();
4024
4025 CopyDataToITensorHandle(inputHandle.get(), inputData.data());
4026
4027 ExecuteWorkload(*workload, memoryManager);
4028
4029 LayerTestResult<uint8_t, 4> ret(outputInfo);
4030
Sadik Armagan483c8112021-06-01 09:24:52 +01004031 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
Teresa Charlind8df0262019-11-11 12:28:15 +00004032
Sadik Armagan483c8112021-06-01 09:24:52 +01004033 return LayerTestResult<uint8_t, 4>(actualOutput,
4034 expectedOutputData,
4035 outputHandle->GetShape(),
4036 outputInfo.GetShape());
Teresa Charlind8df0262019-11-11 12:28:15 +00004037}
4038
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01004039LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
4040 armnn::IWorkloadFactory& workloadFactory,
4041 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4042 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01004043 const armnn::ITensorHandleFactory& tensorHandleFactory,
4044 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01004045 const armnn::DataLayout layout)
4046{
4047 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
Keith Davisf500d6c2020-08-31 08:32:55 +01004048 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01004049}
4050
4051LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
4052 armnn::IWorkloadFactory& workloadFactory,
4053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
4054 armnn::IWorkloadFactory& refWorkloadFactory,
Keith Davisf500d6c2020-08-31 08:32:55 +01004055 const armnn::ITensorHandleFactory& tensorHandleFactory,
4056 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01004057 const armnn::DataLayout layout)
4058{
Derek Lambertif90c56d2020-01-10 17:14:08 +00004059 return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
Keith Davisf500d6c2020-08-31 08:32:55 +01004060 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01004061}