blob: 165e0677b7b6876f0d2893c8e012dd8577ff057f [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
19#ifdef ARMCOMPUTENEON_ENABLED
David Beck0dbe0ee2018-09-24 15:59:27 +010020#include "workloads/NeonAdditionFloatWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010021#include "workloads/NeonActivationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010022#include "workloads/NeonBatchNormalizationFloatWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010023#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010024#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010025#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
26#include "workloads/NeonMultiplicationFloatWorkload.hpp"
27#include "workloads/NeonNormalizationFloatWorkload.hpp"
28#include "workloads/NeonFullyConnectedWorkload.hpp"
29#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010030#include "workloads/NeonPooling2dWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010031#include "workloads/NeonSoftmaxBaseWorkload.hpp"
32#include "workloads/NeonSubtractionFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000033#endif
34
35using namespace boost;
36
37namespace armnn
38{
telsoa014fcda012018-03-09 14:13:49 +000039
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010040namespace
arovir017ff76c52018-10-09 09:40:58 +010041{
telsoa014fcda012018-03-09 14:13:49 +000042
arovir01085f0a42018-10-08 14:48:19 +010043bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000044{
45#if ARMCOMPUTENEON_ENABLED
46 return true;
47#else
arovir01085f0a42018-10-08 14:48:19 +010048 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000049 {
arovir01085f0a42018-10-08 14:48:19 +010050 reasonIfUnsupported.value() = "The armnn library has been built without NEON support";
telsoa014fcda012018-03-09 14:13:49 +000051 }
52 return false;
53#endif
54}
55
telsoa01c577f2c2018-08-31 09:22:23 +010056template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010057bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000058 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010059 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000060 Uint8Func uint8FuncPtr,
61 Params&&... params)
62{
63 return IsNeonBackendSupported(reasonIfUnsupported) &&
64 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
65 dataType,
66 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010067 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000068 uint8FuncPtr,
69 std::forward<Params>(params)...);
70}
71
72#if ARMCOMPUTENEON_ENABLED
73template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010074inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000075{
76 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
77 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
78 if (!supported && reasonIfUnsupported)
79 {
arovir01085f0a42018-10-08 14:48:19 +010080 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000081 }
82 return supported;
83}
84
85#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
86 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
87#else
88#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
89 return IsNeonBackendSupported(reasonIfUnsupported);
90#endif
91
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010092} // anonymous namespace
93
94bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
95 const TensorInfo& output,
96 const ActivationDescriptor& descriptor,
97 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +000098{
99 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100100 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
101 reasonIfUnsupported,
102 input,
103 output,
104 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000105}
106
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100107bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
108 const TensorInfo& input1,
109 const TensorInfo& output,
110 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000111{
telsoa01c577f2c2018-08-31 09:22:23 +0100112 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
113 reasonIfUnsupported,
114 input0,
115 input1,
116 output);
telsoa014fcda012018-03-09 14:13:49 +0000117}
118
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100119bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
120 const TensorInfo& output,
121 const TensorInfo& mean,
122 const TensorInfo& var,
123 const TensorInfo& beta,
124 const TensorInfo& gamma,
125 const BatchNormalizationDescriptor& descriptor,
126 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000127{
telsoa01c577f2c2018-08-31 09:22:23 +0100128 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
129 reasonIfUnsupported,
130 input,
131 output,
132 mean,
133 var,
134 beta,
135 gamma,
136 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000137}
138
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100139bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
140 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000141{
142 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
143 output.GetDataType(),
144 &TrueFunc<>,
145 &TrueFunc<>);
146}
147
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100148bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
149 const TensorInfo& output,
150 Optional<std::string&> reasonIfUnsupported) const
151{
152 ignore_unused(input);
153 ignore_unused(output);
154 ignore_unused(reasonIfUnsupported);
155 return true;
156}
157
158bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
159 const TensorInfo& output,
160 Optional<std::string&> reasonIfUnsupported) const
161{
162 ignore_unused(input);
163 ignore_unused(output);
164 ignore_unused(reasonIfUnsupported);
165 return true;
166}
167
168bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
169 const TensorInfo& output,
170 const Convolution2dDescriptor& descriptor,
171 const TensorInfo& weights,
172 const Optional<TensorInfo>& biases,
173 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000174{
surmeh013537c2c2018-05-18 16:31:43 +0100175 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
176 reasonIfUnsupported,
177 input,
178 output,
179 descriptor,
180 weights,
181 biases);
telsoa014fcda012018-03-09 14:13:49 +0000182}
183
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100184bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
185 const TensorInfo& output,
186 const DepthwiseConvolution2dDescriptor& descriptor,
187 const TensorInfo& weights,
188 const Optional<TensorInfo>& biases,
189 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000190{
telsoa01c577f2c2018-08-31 09:22:23 +0100191 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
192 reasonIfUnsupported,
193 input,
194 output,
195 descriptor,
196 weights,
197 biases);
telsoa014fcda012018-03-09 14:13:49 +0000198}
199
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100200bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
201 const TensorInfo& input1,
202 const TensorInfo& output,
203 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100204{
arovir01085f0a42018-10-08 14:48:19 +0100205 ignore_unused(input0);
206 ignore_unused(input1);
207 ignore_unused(output);
208 ignore_unused(reasonIfUnsupported);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100209 return false;
210}
211
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100212bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
213 const FakeQuantizationDescriptor& descriptor,
214 Optional<std::string&> reasonIfUnsupported) const
David Beckc2044fe2018-09-05 15:00:38 +0100215{
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100216 ignore_unused(input);
217 ignore_unused(descriptor);
218 ignore_unused(reasonIfUnsupported);
219 return false;
David Beckc2044fe2018-09-05 15:00:38 +0100220}
221
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100222bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
223 const TensorInfo& output,
224 Optional<std::string&> reasonIfUnsupported) const
225{
226 ignore_unused(output);
227 return IsNeonBackendSupported(reasonIfUnsupported) &&
228 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
229 input.GetDataType(),
230 &FalseFuncF16<>,
231 &TrueFunc<>,
232 &FalseFuncU8<>);
233}
234
235bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
236 const TensorInfo& output,
237 const TensorInfo& weights,
238 const TensorInfo& biases,
239 const FullyConnectedDescriptor& descriptor,
240 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000241{
telsoa01c577f2c2018-08-31 09:22:23 +0100242 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
243 reasonIfUnsupported,
244 input,
245 output,
246 weights,
247 biases,
248 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000249}
250
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100251bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
252 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000253{
254 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
255 input.GetDataType(),
256 &TrueFunc<>,
257 &TrueFunc<>);
258}
259
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100260bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
261 const TensorInfo& output,
262 const L2NormalizationDescriptor& descriptor,
263 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000264{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100265 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000266}
267
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100268bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
269 const TensorInfo& outputStateIn,
270 const TensorInfo& cellStateIn,
271 const TensorInfo& scratchBuffer,
272 const TensorInfo& outputStateOut,
273 const TensorInfo& cellStateOut,
274 const TensorInfo& output,
275 const LstmDescriptor& descriptor,
276 const TensorInfo& inputToForgetWeights,
277 const TensorInfo& inputToCellWeights,
278 const TensorInfo& inputToOutputWeights,
279 const TensorInfo& recurrentToForgetWeights,
280 const TensorInfo& recurrentToCellWeights,
281 const TensorInfo& recurrentToOutputWeights,
282 const TensorInfo& forgetGateBias,
283 const TensorInfo& cellBias,
284 const TensorInfo& outputGateBias,
285 const TensorInfo* inputToInputWeights,
286 const TensorInfo* recurrentToInputWeights,
287 const TensorInfo* cellToInputWeights,
288 const TensorInfo* inputGateBias,
289 const TensorInfo* projectionWeights,
290 const TensorInfo* projectionBias,
291 const TensorInfo* cellToForgetWeights,
292 const TensorInfo* cellToOutputWeights,
293 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100294{
295 ignore_unused(input);
296 ignore_unused(outputStateIn);
297 ignore_unused(cellStateIn);
298 ignore_unused(scratchBuffer);
299 ignore_unused(outputStateOut);
300 ignore_unused(cellStateOut);
301 ignore_unused(output);
302 ignore_unused(descriptor);
303 ignore_unused(inputToForgetWeights);
304 ignore_unused(inputToCellWeights);
305 ignore_unused(inputToOutputWeights);
306 ignore_unused(recurrentToForgetWeights);
307 ignore_unused(recurrentToCellWeights);
308 ignore_unused(recurrentToOutputWeights);
309 ignore_unused(forgetGateBias);
310 ignore_unused(cellBias);
311 ignore_unused(outputGateBias);
312 ignore_unused(inputToInputWeights);
313 ignore_unused(recurrentToInputWeights);
314 ignore_unused(cellToInputWeights);
315 ignore_unused(inputGateBias);
316 ignore_unused(projectionWeights);
317 ignore_unused(projectionBias);
318 ignore_unused(cellToForgetWeights);
319 ignore_unused(cellToOutputWeights);
arovir01085f0a42018-10-08 14:48:19 +0100320 ignore_unused(reasonIfUnsupported);
telsoa01c577f2c2018-08-31 09:22:23 +0100321 return false;
322}
323
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100324bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
325 const TensorInfo& output,
326 const MeanDescriptor& descriptor,
327 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100328{
arovir01085f0a42018-10-08 14:48:19 +0100329 ignore_unused(input);
330 ignore_unused(output);
331 ignore_unused(descriptor);
332 ignore_unused(reasonIfUnsupported);
narpra0132b90462018-09-13 11:07:48 +0100333 return false;
334}
335
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100336bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
337 const OriginsDescriptor& descriptor,
338 Optional<std::string&> reasonIfUnsupported) const
339{
340 ignore_unused(descriptor);
341 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
342 inputs[0]->GetDataType(),
343 &TrueFunc<>,
344 &TrueFunc<>);
345}
346
347bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
348 const TensorInfo& input1,
349 const TensorInfo& output,
350 Optional<std::string&> reasonIfUnsupported) const
351{
352 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
353 reasonIfUnsupported,
354 input0,
355 input1,
356 output);
357}
358
359bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
360 const TensorInfo& output,
361 const NormalizationDescriptor& descriptor,
362 Optional<std::string&> reasonIfUnsupported) const
363{
364 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
365 reasonIfUnsupported,
366 input,
367 output,
368 descriptor);
369}
370
371bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
372 Optional<std::string&> reasonIfUnsupported) const
373{
374 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
375 output.GetDataType(),
376 &TrueFunc<>,
377 &TrueFunc<>);
378}
379
380bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
381 const TensorInfo& output,
382 const PadDescriptor& descriptor,
383 Optional<std::string&> reasonIfUnsupported) const
Nina Drozd661dfa72018-10-02 11:14:17 +0100384{
arovir01085f0a42018-10-08 14:48:19 +0100385 ignore_unused(input);
386 ignore_unused(output);
387 ignore_unused(descriptor);
388 ignore_unused(reasonIfUnsupported);
Nina Drozd661dfa72018-10-02 11:14:17 +0100389 return false;
390}
391
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100392bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
393 const TensorInfo& output,
394 const PermuteDescriptor& descriptor,
395 Optional<std::string&> reasonIfUnsupported) const
396{
397 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000398}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100399
400bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
401 const TensorInfo& output,
402 const Pooling2dDescriptor& descriptor,
403 Optional<std::string&> reasonIfUnsupported) const
404{
405 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
406}
407
408bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
409 Optional<std::string&> reasonIfUnsupported) const
410{
411 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
412 input.GetDataType(),
413 &TrueFunc<>,
414 &TrueFunc<>);
415}
416
417bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
418 Optional<std::string&> reasonIfUnsupported) const
419{
420 ignore_unused(input);
421 ignore_unused(reasonIfUnsupported);
422 return false;
423}
424
425bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
426 const TensorInfo& output,
427 const SoftmaxDescriptor& descriptor,
428 Optional<std::string&> reasonIfUnsupported) const
429{
430 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
431}
432
433bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
434 const ViewsDescriptor& descriptor,
435 Optional<std::string&> reasonIfUnsupported) const
436{
437 ignore_unused(descriptor);
438 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
439 input.GetDataType(),
440 &TrueFunc<>,
441 &TrueFunc<>);
442}
443
444bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
445 const TensorInfo& input1,
446 const TensorInfo& output,
447 Optional<std::string&> reasonIfUnsupported) const
448{
449 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
450 reasonIfUnsupported,
451 input0,
452 input1,
453 output);
454}
455
456bool IsNeonDirectConvolutionPreferred(const TensorInfo& weightInfo, const Convolution2dDescriptor& desc)
457{
458 // See arm_compute::NEDirectConvolutionLayer documentation for the supported cases,
459 // and complement with NEDirectConvolutionLayerKernel::configure() implementation.
460
461 // Only 1x1 is using direct convolution. Performance results and details are in:
462 // https://jira.arm.com/browse/IVGCVSW-1003
463 // Measurements were taken as of clframework: f105ab972135bcd21304883eff040d7e587099bc
464
465 const bool dataTypeSupported = (weightInfo.GetDataType() == armnn::DataType::Float32);
466
467 // Strides: 1|2|3
468 const bool strideSupported = (desc.m_StrideX == 1 || desc.m_StrideX == 2 || desc.m_StrideX == 3) &&
469 (desc.m_StrideY == 1 || desc.m_StrideY == 2 || desc.m_StrideY == 3);
470
471 auto paddingLargerThan = [](const Convolution2dDescriptor& conv2ddesc, unsigned int value)
472 {
473 return conv2ddesc.m_PadLeft > value || conv2ddesc.m_PadRight > value ||
474 conv2ddesc.m_PadTop > value || conv2ddesc.m_PadBottom > value;
475 };
476
477 // Supported sizes and padding.
478 const bool sizeAndPaddingSupported =
479 // Pad > 0 not supported for 1x1 weights.
480 (weightInfo.GetShape()[2] == 1 && weightInfo.GetShape()[3] == 1 && !paddingLargerThan(desc, 0u));
481
482 const bool preferDirectConvolution = dataTypeSupported &&
483 strideSupported &&
484 sizeAndPaddingSupported &&
485 // NEDirectConvolutionLayerKernel doesn't support NULL bias.
486 desc.m_BiasEnabled;
487 return preferDirectConvolution;
488}
489
490} // namespace armnn