blob: e84eb799fcff46176a84d213c6a926870a0af7d5 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010020#include <aclCommon/ArmComputeUtils.hpp>
Matthew Bentham955258d2018-12-10 10:48:52 +000021#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010022#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000023#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010024#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000026#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010027#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000028#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000029#include "workloads/NeonMeanWorkload.hpp"
Jim Flynn39d487d2019-05-17 15:44:36 +010030#include "workloads/NeonConcatWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000031#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000032#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010033#include "workloads/NeonNormalizationFloatWorkload.hpp"
34#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000035#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010036#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010037#include "workloads/NeonPooling2dWorkload.hpp"
Sadik Armaganc625f002018-12-17 11:32:16 +000038#include "workloads/NeonResizeBilinearWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010039#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010040#include "workloads/NeonSplitterWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000041#include "workloads/NeonSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000042#endif
43
44using namespace boost;
45
46namespace armnn
47{
telsoa014fcda012018-03-09 14:13:49 +000048
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010049namespace
arovir017ff76c52018-10-09 09:40:58 +010050{
telsoa014fcda012018-03-09 14:13:49 +000051
arovir01085f0a42018-10-08 14:48:19 +010052bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000053{
Matteo Martincighd95e9062019-01-31 15:35:59 +000054#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000055 return true;
56#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010057 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000058 return false;
59#endif
60}
61
telsoa01c577f2c2018-08-31 09:22:23 +010062template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010063bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000064 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010065 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000066 Uint8Func uint8FuncPtr,
67 Params&&... params)
68{
69 return IsNeonBackendSupported(reasonIfUnsupported) &&
70 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
71 dataType,
72 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010073 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000074 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000075 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000076 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000077 std::forward<Params>(params)...);
78}
79
Matteo Martincighd95e9062019-01-31 15:35:59 +000080#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000081template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010082inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
85 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
86 if (!supported && reasonIfUnsupported)
87 {
arovir01085f0a42018-10-08 14:48:19 +010088 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000089 }
90 return supported;
91}
92
93#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
94 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
95#else
96#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
97 return IsNeonBackendSupported(reasonIfUnsupported);
98#endif
99
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100100} // anonymous namespace
101
102bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
103 const TensorInfo& output,
104 const ActivationDescriptor& descriptor,
105 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000106{
107 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100108 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
109 reasonIfUnsupported,
110 input,
111 output,
112 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000113}
114
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100115bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
116 const TensorInfo& input1,
117 const TensorInfo& output,
118 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000119{
telsoa01c577f2c2018-08-31 09:22:23 +0100120 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
121 reasonIfUnsupported,
122 input0,
123 input1,
124 output);
telsoa014fcda012018-03-09 14:13:49 +0000125}
126
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100127bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
128 const TensorInfo& output,
129 const TensorInfo& mean,
130 const TensorInfo& var,
131 const TensorInfo& beta,
132 const TensorInfo& gamma,
133 const BatchNormalizationDescriptor& descriptor,
134 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000135{
telsoa01c577f2c2018-08-31 09:22:23 +0100136 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
137 reasonIfUnsupported,
138 input,
139 output,
140 mean,
141 var,
142 beta,
143 gamma,
144 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000145}
146
Jim Flynn906f9462019-05-10 13:55:21 +0100147bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
148 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100149 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100150 Optional<std::string&> reasonIfUnsupported) const
151{
Jim Flynne242f2d2019-05-22 14:24:13 +0100152 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
153 {
154 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
155 return false;
156 }
157
158 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
159 if(concatInnerAxis < 3) // Width, height, or channels
160 {
161 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
162 reasonIfUnsupported,
163 inputs,
164 output,
165 descriptor);
166 }
167 else if (concatInnerAxis == 3)
168 {
169 for (auto& input : inputs)
170 {
171 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
172 {
173 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
174 return false;
175 }
176 }
177 return true; // Sub-tensors support concat along batch
178 }
179 else // > 4 dimensions not supported.
180 {
181 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
182 return false;
183 }
Jim Flynn906f9462019-05-10 13:55:21 +0100184}
185
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100186bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
187 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000188{
189 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
190 output.GetDataType(),
191 &TrueFunc<>,
192 &TrueFunc<>);
193}
194
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100195bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
196 const TensorInfo& output,
197 Optional<std::string&> reasonIfUnsupported) const
198{
199 ignore_unused(input);
200 ignore_unused(output);
201 ignore_unused(reasonIfUnsupported);
202 return true;
203}
204
205bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
206 const TensorInfo& output,
207 Optional<std::string&> reasonIfUnsupported) const
208{
209 ignore_unused(input);
210 ignore_unused(output);
211 ignore_unused(reasonIfUnsupported);
212 return true;
213}
214
215bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
216 const TensorInfo& output,
217 const Convolution2dDescriptor& descriptor,
218 const TensorInfo& weights,
219 const Optional<TensorInfo>& biases,
220 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000221{
surmeh013537c2c2018-05-18 16:31:43 +0100222 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
223 reasonIfUnsupported,
224 input,
225 output,
226 descriptor,
227 weights,
228 biases);
telsoa014fcda012018-03-09 14:13:49 +0000229}
230
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100231bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
232 const TensorInfo& output,
233 const DepthwiseConvolution2dDescriptor& descriptor,
234 const TensorInfo& weights,
235 const Optional<TensorInfo>& biases,
236 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000237{
telsoa01c577f2c2018-08-31 09:22:23 +0100238 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
239 reasonIfUnsupported,
240 input,
241 output,
242 descriptor,
243 weights,
244 biases);
telsoa014fcda012018-03-09 14:13:49 +0000245}
246
Pablo Tellof0bd6832019-04-26 17:58:13 +0100247bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
248 const TensorInfo& output,
249 const DepthwiseConvolution2dDescriptor& descriptor,
250 const TensorInfo& weights,
251 const Optional<TensorInfo>& biases,
252 Optional<std::string&> reasonIfUnsupported) const
253{
254 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
255 reasonIfUnsupported,
256 input,
257 output,
258 descriptor,
259 weights,
260 biases);
261}
262
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100263bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
264 const TensorInfo& output,
265 Optional<std::string&> reasonIfUnsupported) const
266{
267 ignore_unused(output);
268 return IsNeonBackendSupported(reasonIfUnsupported) &&
269 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
270 input.GetDataType(),
271 &FalseFuncF16<>,
272 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000273 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000274 &FalseFuncI32<>,
275 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100276}
277
278bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
279 const TensorInfo& output,
280 const TensorInfo& weights,
281 const TensorInfo& biases,
282 const FullyConnectedDescriptor& descriptor,
283 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000284{
telsoa01c577f2c2018-08-31 09:22:23 +0100285 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
286 reasonIfUnsupported,
287 input,
288 output,
289 weights,
290 biases,
291 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000292}
293
kevmay01eed85922019-01-28 08:37:25 +0000294bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
295 const armnn::TensorInfo& input1,
296 const armnn::TensorInfo& output,
297 armnn::Optional<std::string&> reasonIfUnsupported) const
298{
299 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
300 reasonIfUnsupported,
301 input0,
302 input1,
303 output);
304}
305
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100306bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
307 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000308{
309 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
310 input.GetDataType(),
311 &TrueFunc<>,
312 &TrueFunc<>);
313}
314
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100315bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
316 const TensorInfo& output,
317 const L2NormalizationDescriptor& descriptor,
318 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000319{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100320 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000321}
322
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000323bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
324 const TensorInfo& input1,
325 const TensorInfo& output,
326 Optional<std::string&> reasonIfUnsupported) const
327{
328 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
329 reasonIfUnsupported,
330 input0,
331 input1,
332 output);
333}
334
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100335bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
336 const TensorInfo& output,
337 const MeanDescriptor& descriptor,
338 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100339{
Matthew Benthamfd899962018-12-31 15:49:42 +0000340 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
341 reasonIfUnsupported,
342 input,
343 output,
344 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100345}
346
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000347bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
348 const TensorInfo &output,
349 Optional<std::string &> reasonIfUnsupported) const
350{
351 ignore_unused(input);
352 ignore_unused(output);
353 return true;
354}
355
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100356bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000357 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100358 const MergerDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100359 Optional<std::string&> reasonIfUnsupported) const
360{
Jim Flynne242f2d2019-05-22 14:24:13 +0100361 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100362}
363
Conor Kennedy54b21692019-01-09 07:57:38 +0000364bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
365 const TensorInfo& input1,
366 const TensorInfo& output,
367 Optional<std::string&> reasonIfUnsupported) const
368{
369 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
370 reasonIfUnsupported,
371 input0,
372 input1,
373 output);
374}
375
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100376bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
377 const TensorInfo& input1,
378 const TensorInfo& output,
379 Optional<std::string&> reasonIfUnsupported) const
380{
381 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
382 reasonIfUnsupported,
383 input0,
384 input1,
385 output);
386}
387
388bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
389 const TensorInfo& output,
390 const NormalizationDescriptor& descriptor,
391 Optional<std::string&> reasonIfUnsupported) const
392{
393 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
394 reasonIfUnsupported,
395 input,
396 output,
397 descriptor);
398}
399
400bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
401 Optional<std::string&> reasonIfUnsupported) const
402{
kevmay012b4d88e2019-01-24 14:05:09 +0000403 return IsNeonBackendSupported(reasonIfUnsupported) &&
404 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
405 output.GetDataType(),
406 &TrueFunc<>,
407 &TrueFunc<>,
408 &TrueFunc<>,
409 &FalseFuncI32<>,
410 &TrueFunc<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100411}
412
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000413bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
414 const TensorInfo& output,
415 const PadDescriptor& descriptor,
416 Optional<std::string&> reasonIfUnsupported) const
417{
418 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
419 reasonIfUnsupported,
420 input,
421 output,
422 descriptor);
423}
424
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100425bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
426 const TensorInfo& output,
427 const PermuteDescriptor& descriptor,
428 Optional<std::string&> reasonIfUnsupported) const
429{
430 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000431}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100432
433bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
434 const TensorInfo& output,
435 const Pooling2dDescriptor& descriptor,
436 Optional<std::string&> reasonIfUnsupported) const
437{
438 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
439}
440
441bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000442 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100443 Optional<std::string&> reasonIfUnsupported) const
444{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000445 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100446 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
447 input.GetDataType(),
448 &TrueFunc<>,
449 &TrueFunc<>);
450}
451
Sadik Armaganc625f002018-12-17 11:32:16 +0000452bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
453 const TensorInfo& output,
454 Optional<std::string&> reasonIfUnsupported) const
455{
456 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
457 reasonIfUnsupported,
458 input,
459 output);
460}
461
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100462bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
463 const TensorInfo& output,
464 const SoftmaxDescriptor& descriptor,
465 Optional<std::string&> reasonIfUnsupported) const
466{
467 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
468}
469
470bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
471 const ViewsDescriptor& descriptor,
472 Optional<std::string&> reasonIfUnsupported) const
473{
474 ignore_unused(descriptor);
475 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
476 input.GetDataType(),
477 &TrueFunc<>,
478 &TrueFunc<>);
479}
480
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100481bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
482 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
483 const ViewsDescriptor& descriptor,
484 Optional<std::string&> reasonIfUnsupported) const
485{
486#if defined(ARMCOMPUTENEON_ENABLED)
487 // Split along the last dimension, cannot use sub-tensors
488 // as width and height of the sub-tensors do not match
489 // the width and height of the parent tensor
490 // in case of input with more than 2D.
491 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
492 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
493 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
494 {
495 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
496 reasonIfUnsupported,
497 input,
498 outputs,
499 *splitAxis.begin());
500 }
501#endif
502 for (auto output : outputs)
503 {
504 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
505 {
506 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
507 return false;
508 }
509 }
510 return true;
511}
512
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100513bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
514 const TensorInfo& input1,
515 const TensorInfo& output,
516 Optional<std::string&> reasonIfUnsupported) const
517{
518 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
519 reasonIfUnsupported,
520 input0,
521 input1,
522 output);
523}
524
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100525} // namespace armnn