blob: c257dd373a0714aa21f7e4d915e372ba23b87c14 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Matthew Bentham955258d2018-12-10 10:48:52 +000020#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010021#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000022#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010023#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010024#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000025#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010026#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000027#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000028#include "workloads/NeonMeanWorkload.hpp"
Nikhil Raj8599a412018-11-19 14:51:07 +000029#include "workloads/NeonMergerWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000030#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000031#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010032#include "workloads/NeonNormalizationFloatWorkload.hpp"
33#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000034#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010035#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010036#include "workloads/NeonPooling2dWorkload.hpp"
Sadik Armaganc625f002018-12-17 11:32:16 +000037#include "workloads/NeonResizeBilinearWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010038#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000039#include "workloads/NeonSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000040#endif
41
42using namespace boost;
43
44namespace armnn
45{
telsoa014fcda012018-03-09 14:13:49 +000046
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010047namespace
arovir017ff76c52018-10-09 09:40:58 +010048{
telsoa014fcda012018-03-09 14:13:49 +000049
arovir01085f0a42018-10-08 14:48:19 +010050bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000051{
Matteo Martincighd95e9062019-01-31 15:35:59 +000052#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000053 return true;
54#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010055 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000056 return false;
57#endif
58}
59
telsoa01c577f2c2018-08-31 09:22:23 +010060template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010061bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000062 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010063 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000064 Uint8Func uint8FuncPtr,
65 Params&&... params)
66{
67 return IsNeonBackendSupported(reasonIfUnsupported) &&
68 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
69 dataType,
70 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010071 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000072 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000073 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000074 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000075 std::forward<Params>(params)...);
76}
77
Matteo Martincighd95e9062019-01-31 15:35:59 +000078#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000079template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010080inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000081{
82 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
83 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
84 if (!supported && reasonIfUnsupported)
85 {
arovir01085f0a42018-10-08 14:48:19 +010086 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000087 }
88 return supported;
89}
90
91#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
92 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
93#else
94#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
95 return IsNeonBackendSupported(reasonIfUnsupported);
96#endif
97
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010098} // anonymous namespace
99
100bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
101 const TensorInfo& output,
102 const ActivationDescriptor& descriptor,
103 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000104{
105 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100106 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
107 reasonIfUnsupported,
108 input,
109 output,
110 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000111}
112
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100113bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
114 const TensorInfo& input1,
115 const TensorInfo& output,
116 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000117{
telsoa01c577f2c2018-08-31 09:22:23 +0100118 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
119 reasonIfUnsupported,
120 input0,
121 input1,
122 output);
telsoa014fcda012018-03-09 14:13:49 +0000123}
124
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100125bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
126 const TensorInfo& output,
127 const TensorInfo& mean,
128 const TensorInfo& var,
129 const TensorInfo& beta,
130 const TensorInfo& gamma,
131 const BatchNormalizationDescriptor& descriptor,
132 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000133{
telsoa01c577f2c2018-08-31 09:22:23 +0100134 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
135 reasonIfUnsupported,
136 input,
137 output,
138 mean,
139 var,
140 beta,
141 gamma,
142 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000143}
144
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100145bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
146 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000147{
148 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
149 output.GetDataType(),
150 &TrueFunc<>,
151 &TrueFunc<>);
152}
153
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100154bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
155 const TensorInfo& output,
156 Optional<std::string&> reasonIfUnsupported) const
157{
158 ignore_unused(input);
159 ignore_unused(output);
160 ignore_unused(reasonIfUnsupported);
161 return true;
162}
163
164bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
165 const TensorInfo& output,
166 Optional<std::string&> reasonIfUnsupported) const
167{
168 ignore_unused(input);
169 ignore_unused(output);
170 ignore_unused(reasonIfUnsupported);
171 return true;
172}
173
174bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
175 const TensorInfo& output,
176 const Convolution2dDescriptor& descriptor,
177 const TensorInfo& weights,
178 const Optional<TensorInfo>& biases,
179 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000180{
surmeh013537c2c2018-05-18 16:31:43 +0100181 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
182 reasonIfUnsupported,
183 input,
184 output,
185 descriptor,
186 weights,
187 biases);
telsoa014fcda012018-03-09 14:13:49 +0000188}
189
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100190bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
191 const TensorInfo& output,
192 const DepthwiseConvolution2dDescriptor& descriptor,
193 const TensorInfo& weights,
194 const Optional<TensorInfo>& biases,
195 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000196{
telsoa01c577f2c2018-08-31 09:22:23 +0100197 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
198 reasonIfUnsupported,
199 input,
200 output,
201 descriptor,
202 weights,
203 biases);
telsoa014fcda012018-03-09 14:13:49 +0000204}
205
Pablo Tellof0bd6832019-04-26 17:58:13 +0100206bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
207 const TensorInfo& output,
208 const DepthwiseConvolution2dDescriptor& descriptor,
209 const TensorInfo& weights,
210 const Optional<TensorInfo>& biases,
211 Optional<std::string&> reasonIfUnsupported) const
212{
213 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
214 reasonIfUnsupported,
215 input,
216 output,
217 descriptor,
218 weights,
219 biases);
220}
221
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100222bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
223 const TensorInfo& output,
224 Optional<std::string&> reasonIfUnsupported) const
225{
226 ignore_unused(output);
227 return IsNeonBackendSupported(reasonIfUnsupported) &&
228 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
229 input.GetDataType(),
230 &FalseFuncF16<>,
231 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000232 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000233 &FalseFuncI32<>,
234 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100235}
236
237bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
238 const TensorInfo& output,
239 const TensorInfo& weights,
240 const TensorInfo& biases,
241 const FullyConnectedDescriptor& descriptor,
242 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000243{
telsoa01c577f2c2018-08-31 09:22:23 +0100244 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
245 reasonIfUnsupported,
246 input,
247 output,
248 weights,
249 biases,
250 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000251}
252
kevmay01eed85922019-01-28 08:37:25 +0000253bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
254 const armnn::TensorInfo& input1,
255 const armnn::TensorInfo& output,
256 armnn::Optional<std::string&> reasonIfUnsupported) const
257{
258 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
259 reasonIfUnsupported,
260 input0,
261 input1,
262 output);
263}
264
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100265bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
266 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000267{
268 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
269 input.GetDataType(),
270 &TrueFunc<>,
271 &TrueFunc<>);
272}
273
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100274bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
275 const TensorInfo& output,
276 const L2NormalizationDescriptor& descriptor,
277 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000278{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100279 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000280}
281
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000282bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
283 const TensorInfo& input1,
284 const TensorInfo& output,
285 Optional<std::string&> reasonIfUnsupported) const
286{
287 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
288 reasonIfUnsupported,
289 input0,
290 input1,
291 output);
292}
293
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100294bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
295 const TensorInfo& output,
296 const MeanDescriptor& descriptor,
297 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100298{
Matthew Benthamfd899962018-12-31 15:49:42 +0000299 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
300 reasonIfUnsupported,
301 input,
302 output,
303 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100304}
305
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000306bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
307 const TensorInfo &output,
308 Optional<std::string &> reasonIfUnsupported) const
309{
310 ignore_unused(input);
311 ignore_unused(output);
312 return true;
313}
314
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100315bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000316 const TensorInfo& output,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100317 const OriginsDescriptor& descriptor,
318 Optional<std::string&> reasonIfUnsupported) const
319{
Derek Lamberti0790dce2019-04-15 18:37:35 +0100320 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
321 {
322 SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
323 return false;
324 }
325
326 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
327 if(concatInnerAxis < 3) // Width, height, or channels
Nikhil Raj8599a412018-11-19 14:51:07 +0000328 {
329 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate,
330 reasonIfUnsupported,
331 inputs,
332 output,
333 descriptor);
334 }
Derek Lamberti0790dce2019-04-15 18:37:35 +0100335 else if (concatInnerAxis == 3)
336 {
337 for (auto& input : inputs)
338 {
339 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
340 {
341 SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
342 return false;
343 }
344 }
345 return true; // Sub-tensors support concat along batch
346 }
347 else // > 4 dimensions not supported.
348 {
349 SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
350 return false;
351 }
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100352}
353
Conor Kennedy54b21692019-01-09 07:57:38 +0000354bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
355 const TensorInfo& input1,
356 const TensorInfo& output,
357 Optional<std::string&> reasonIfUnsupported) const
358{
359 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
360 reasonIfUnsupported,
361 input0,
362 input1,
363 output);
364}
365
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100366bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
367 const TensorInfo& input1,
368 const TensorInfo& output,
369 Optional<std::string&> reasonIfUnsupported) const
370{
371 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
372 reasonIfUnsupported,
373 input0,
374 input1,
375 output);
376}
377
378bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
379 const TensorInfo& output,
380 const NormalizationDescriptor& descriptor,
381 Optional<std::string&> reasonIfUnsupported) const
382{
383 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
384 reasonIfUnsupported,
385 input,
386 output,
387 descriptor);
388}
389
390bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
391 Optional<std::string&> reasonIfUnsupported) const
392{
kevmay012b4d88e2019-01-24 14:05:09 +0000393 return IsNeonBackendSupported(reasonIfUnsupported) &&
394 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
395 output.GetDataType(),
396 &TrueFunc<>,
397 &TrueFunc<>,
398 &TrueFunc<>,
399 &FalseFuncI32<>,
400 &TrueFunc<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100401}
402
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000403bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
404 const TensorInfo& output,
405 const PadDescriptor& descriptor,
406 Optional<std::string&> reasonIfUnsupported) const
407{
408 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
409 reasonIfUnsupported,
410 input,
411 output,
412 descriptor);
413}
414
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100415bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
416 const TensorInfo& output,
417 const PermuteDescriptor& descriptor,
418 Optional<std::string&> reasonIfUnsupported) const
419{
420 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000421}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100422
423bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
424 const TensorInfo& output,
425 const Pooling2dDescriptor& descriptor,
426 Optional<std::string&> reasonIfUnsupported) const
427{
428 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
429}
430
431bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000432 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100433 Optional<std::string&> reasonIfUnsupported) const
434{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000435 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100436 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
437 input.GetDataType(),
438 &TrueFunc<>,
439 &TrueFunc<>);
440}
441
Sadik Armaganc625f002018-12-17 11:32:16 +0000442bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
443 const TensorInfo& output,
444 Optional<std::string&> reasonIfUnsupported) const
445{
446 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
447 reasonIfUnsupported,
448 input,
449 output);
450}
451
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100452bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
453 const TensorInfo& output,
454 const SoftmaxDescriptor& descriptor,
455 Optional<std::string&> reasonIfUnsupported) const
456{
457 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
458}
459
460bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
461 const ViewsDescriptor& descriptor,
462 Optional<std::string&> reasonIfUnsupported) const
463{
464 ignore_unused(descriptor);
465 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
466 input.GetDataType(),
467 &TrueFunc<>,
468 &TrueFunc<>);
469}
470
471bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
472 const TensorInfo& input1,
473 const TensorInfo& output,
474 Optional<std::string&> reasonIfUnsupported) const
475{
476 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
477 reasonIfUnsupported,
478 input0,
479 input1,
480 output);
481}
482
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100483} // namespace armnn