blob: 898660cc91b348fc63adba7b6bdbac2d5f1bb9f1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Matthew Bentham955258d2018-12-10 10:48:52 +000020#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010021#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000022#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010023#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010024#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000025#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010026#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000027#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000028#include "workloads/NeonMeanWorkload.hpp"
Nikhil Raj8599a412018-11-19 14:51:07 +000029#include "workloads/NeonMergerWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000030#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000031#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010032#include "workloads/NeonNormalizationFloatWorkload.hpp"
33#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000034#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010035#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010036#include "workloads/NeonPooling2dWorkload.hpp"
Sadik Armaganc625f002018-12-17 11:32:16 +000037#include "workloads/NeonResizeBilinearWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010038#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000039#include "workloads/NeonSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#endif
41
42using namespace boost;
43
44namespace armnn
45{
telsoa014fcda012018-03-09 14:13:49 +000046
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010047namespace
arovir017ff76c52018-10-09 09:40:58 +010048{
telsoa014fcda012018-03-09 14:13:49 +000049
arovir01085f0a42018-10-08 14:48:19 +010050bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000051{
Matteo Martincighd95e9062019-01-31 15:35:59 +000052#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000053 return true;
54#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010055 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000056 return false;
57#endif
58}
59
telsoa01c577f2c2018-08-31 09:22:23 +010060template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010061bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000062 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010063 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000064 Uint8Func uint8FuncPtr,
65 Params&&... params)
66{
67 return IsNeonBackendSupported(reasonIfUnsupported) &&
68 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
69 dataType,
70 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010071 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000072 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000073 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000074 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000075 std::forward<Params>(params)...);
76}
77
Matteo Martincighd95e9062019-01-31 15:35:59 +000078#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000079template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010080inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000081{
82 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
83 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
84 if (!supported && reasonIfUnsupported)
85 {
arovir01085f0a42018-10-08 14:48:19 +010086 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000087 }
88 return supported;
89}
90
91#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
92 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
93#else
94#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
95 return IsNeonBackendSupported(reasonIfUnsupported);
96#endif
97
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010098} // anonymous namespace
99
100bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
101 const TensorInfo& output,
102 const ActivationDescriptor& descriptor,
103 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000104{
105 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100106 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
107 reasonIfUnsupported,
108 input,
109 output,
110 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000111}
112
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100113bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
114 const TensorInfo& input1,
115 const TensorInfo& output,
116 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000117{
telsoa01c577f2c2018-08-31 09:22:23 +0100118 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
119 reasonIfUnsupported,
120 input0,
121 input1,
122 output);
telsoa014fcda012018-03-09 14:13:49 +0000123}
124
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100125bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
126 const TensorInfo& output,
127 const TensorInfo& mean,
128 const TensorInfo& var,
129 const TensorInfo& beta,
130 const TensorInfo& gamma,
131 const BatchNormalizationDescriptor& descriptor,
132 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000133{
telsoa01c577f2c2018-08-31 09:22:23 +0100134 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
135 reasonIfUnsupported,
136 input,
137 output,
138 mean,
139 var,
140 beta,
141 gamma,
142 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000143}
144
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100145bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
146 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000147{
148 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
149 output.GetDataType(),
150 &TrueFunc<>,
151 &TrueFunc<>);
152}
153
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100154bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
155 const TensorInfo& output,
156 Optional<std::string&> reasonIfUnsupported) const
157{
158 ignore_unused(input);
159 ignore_unused(output);
160 ignore_unused(reasonIfUnsupported);
161 return true;
162}
163
164bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
165 const TensorInfo& output,
166 Optional<std::string&> reasonIfUnsupported) const
167{
168 ignore_unused(input);
169 ignore_unused(output);
170 ignore_unused(reasonIfUnsupported);
171 return true;
172}
173
174bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
175 const TensorInfo& output,
176 const Convolution2dDescriptor& descriptor,
177 const TensorInfo& weights,
178 const Optional<TensorInfo>& biases,
179 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000180{
surmeh013537c2c2018-05-18 16:31:43 +0100181 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
182 reasonIfUnsupported,
183 input,
184 output,
185 descriptor,
186 weights,
187 biases);
telsoa014fcda012018-03-09 14:13:49 +0000188}
189
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100190bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
191 const TensorInfo& output,
192 const DepthwiseConvolution2dDescriptor& descriptor,
193 const TensorInfo& weights,
194 const Optional<TensorInfo>& biases,
195 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000196{
telsoa01c577f2c2018-08-31 09:22:23 +0100197 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
198 reasonIfUnsupported,
199 input,
200 output,
201 descriptor,
202 weights,
203 biases);
telsoa014fcda012018-03-09 14:13:49 +0000204}
205
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100206bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
207 const TensorInfo& output,
208 Optional<std::string&> reasonIfUnsupported) const
209{
210 ignore_unused(output);
211 return IsNeonBackendSupported(reasonIfUnsupported) &&
212 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
213 input.GetDataType(),
214 &FalseFuncF16<>,
215 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000216 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000217 &FalseFuncI32<>,
218 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100219}
220
221bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
222 const TensorInfo& output,
223 const TensorInfo& weights,
224 const TensorInfo& biases,
225 const FullyConnectedDescriptor& descriptor,
226 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000227{
telsoa01c577f2c2018-08-31 09:22:23 +0100228 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
229 reasonIfUnsupported,
230 input,
231 output,
232 weights,
233 biases,
234 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000235}
236
kevmay01eed85922019-01-28 08:37:25 +0000237bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
238 const armnn::TensorInfo& input1,
239 const armnn::TensorInfo& output,
240 armnn::Optional<std::string&> reasonIfUnsupported) const
241{
242 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
243 reasonIfUnsupported,
244 input0,
245 input1,
246 output);
247}
248
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100249bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
250 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000251{
252 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
253 input.GetDataType(),
254 &TrueFunc<>,
255 &TrueFunc<>);
256}
257
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100258bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
259 const TensorInfo& output,
260 const L2NormalizationDescriptor& descriptor,
261 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000262{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100263 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000264}
265
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000266bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
267 const TensorInfo& input1,
268 const TensorInfo& output,
269 Optional<std::string&> reasonIfUnsupported) const
270{
271 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
272 reasonIfUnsupported,
273 input0,
274 input1,
275 output);
276}
277
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100278bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
279 const TensorInfo& output,
280 const MeanDescriptor& descriptor,
281 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100282{
Matthew Benthamfd899962018-12-31 15:49:42 +0000283 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
284 reasonIfUnsupported,
285 input,
286 output,
287 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100288}
289
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000290bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
291 const TensorInfo &output,
292 Optional<std::string &> reasonIfUnsupported) const
293{
294 ignore_unused(input);
295 ignore_unused(output);
296 return true;
297}
298
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100299bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000300 const TensorInfo& output,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100301 const OriginsDescriptor& descriptor,
302 Optional<std::string&> reasonIfUnsupported) const
303{
Derek Lamberti0790dce2019-04-15 18:37:35 +0100304 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
305 {
306 SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
307 return false;
308 }
309
310 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
311 if(concatInnerAxis < 3) // Width, height, or channels
Nikhil Raj8599a412018-11-19 14:51:07 +0000312 {
313 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate,
314 reasonIfUnsupported,
315 inputs,
316 output,
317 descriptor);
318 }
Derek Lamberti0790dce2019-04-15 18:37:35 +0100319 else if (concatInnerAxis == 3)
320 {
321 for (auto& input : inputs)
322 {
323 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
324 {
325 SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
326 return false;
327 }
328 }
329 return true; // Sub-tensors support concat along batch
330 }
331 else // > 4 dimensions not supported.
332 {
333 SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
334 return false;
335 }
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100336}
337
Conor Kennedy54b21692019-01-09 07:57:38 +0000338bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
339 const TensorInfo& input1,
340 const TensorInfo& output,
341 Optional<std::string&> reasonIfUnsupported) const
342{
343 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
344 reasonIfUnsupported,
345 input0,
346 input1,
347 output);
348}
349
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100350bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
351 const TensorInfo& input1,
352 const TensorInfo& output,
353 Optional<std::string&> reasonIfUnsupported) const
354{
355 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
356 reasonIfUnsupported,
357 input0,
358 input1,
359 output);
360}
361
362bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
363 const TensorInfo& output,
364 const NormalizationDescriptor& descriptor,
365 Optional<std::string&> reasonIfUnsupported) const
366{
367 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
368 reasonIfUnsupported,
369 input,
370 output,
371 descriptor);
372}
373
374bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
375 Optional<std::string&> reasonIfUnsupported) const
376{
kevmay012b4d88e2019-01-24 14:05:09 +0000377 return IsNeonBackendSupported(reasonIfUnsupported) &&
378 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
379 output.GetDataType(),
380 &TrueFunc<>,
381 &TrueFunc<>,
382 &TrueFunc<>,
383 &FalseFuncI32<>,
384 &TrueFunc<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100385}
386
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000387bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
388 const TensorInfo& output,
389 const PadDescriptor& descriptor,
390 Optional<std::string&> reasonIfUnsupported) const
391{
392 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
393 reasonIfUnsupported,
394 input,
395 output,
396 descriptor);
397}
398
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100399bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
400 const TensorInfo& output,
401 const PermuteDescriptor& descriptor,
402 Optional<std::string&> reasonIfUnsupported) const
403{
404 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000405}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100406
407bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
408 const TensorInfo& output,
409 const Pooling2dDescriptor& descriptor,
410 Optional<std::string&> reasonIfUnsupported) const
411{
412 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
413}
414
415bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000416 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100417 Optional<std::string&> reasonIfUnsupported) const
418{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000419 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100420 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
421 input.GetDataType(),
422 &TrueFunc<>,
423 &TrueFunc<>);
424}
425
Sadik Armaganc625f002018-12-17 11:32:16 +0000426bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
427 const TensorInfo& output,
428 Optional<std::string&> reasonIfUnsupported) const
429{
430 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
431 reasonIfUnsupported,
432 input,
433 output);
434}
435
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100436bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
437 const TensorInfo& output,
438 const SoftmaxDescriptor& descriptor,
439 Optional<std::string&> reasonIfUnsupported) const
440{
441 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
442}
443
444bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
445 const ViewsDescriptor& descriptor,
446 Optional<std::string&> reasonIfUnsupported) const
447{
448 ignore_unused(descriptor);
449 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
450 input.GetDataType(),
451 &TrueFunc<>,
452 &TrueFunc<>);
453}
454
455bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
456 const TensorInfo& input1,
457 const TensorInfo& output,
458 Optional<std::string&> reasonIfUnsupported) const
459{
460 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
461 reasonIfUnsupported,
462 input0,
463 input1,
464 output);
465}
466
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100467} // namespace armnn