blob: fd9aac5bc5dcf3f642173b19569e05c4df1f5267 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010020#include <aclCommon/ArmComputeUtils.hpp>
Matthew Bentham955258d2018-12-10 10:48:52 +000021#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010022#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000023#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010024#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000026#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010027#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000028#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000029#include "workloads/NeonMeanWorkload.hpp"
Jim Flynn39d487d2019-05-17 15:44:36 +010030#include "workloads/NeonConcatWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000031#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000032#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010033#include "workloads/NeonNormalizationFloatWorkload.hpp"
34#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000035#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010036#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010037#include "workloads/NeonPooling2dWorkload.hpp"
Sadik Armaganc625f002018-12-17 11:32:16 +000038#include "workloads/NeonResizeBilinearWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010039#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010040#include "workloads/NeonSplitterWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000041#include "workloads/NeonSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000042#endif
43
44using namespace boost;
45
46namespace armnn
47{
telsoa014fcda012018-03-09 14:13:49 +000048
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010049namespace
arovir017ff76c52018-10-09 09:40:58 +010050{
telsoa014fcda012018-03-09 14:13:49 +000051
arovir01085f0a42018-10-08 14:48:19 +010052bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000053{
Matteo Martincighd95e9062019-01-31 15:35:59 +000054#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000055 return true;
56#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010057 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000058 return false;
59#endif
60}
61
telsoa01c577f2c2018-08-31 09:22:23 +010062template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010063bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000064 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010065 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000066 Uint8Func uint8FuncPtr,
67 Params&&... params)
68{
69 return IsNeonBackendSupported(reasonIfUnsupported) &&
70 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
71 dataType,
72 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010073 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000074 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000075 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000076 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000077 std::forward<Params>(params)...);
78}
79
Matteo Martincighd95e9062019-01-31 15:35:59 +000080#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000081template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010082inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000083{
84 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
85 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
86 if (!supported && reasonIfUnsupported)
87 {
arovir01085f0a42018-10-08 14:48:19 +010088 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000089 }
90 return supported;
91}
92
93#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
94 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
95#else
96#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
97 return IsNeonBackendSupported(reasonIfUnsupported);
98#endif
99
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100100} // anonymous namespace
101
102bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
103 const TensorInfo& output,
104 const ActivationDescriptor& descriptor,
105 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000106{
107 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100108 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
109 reasonIfUnsupported,
110 input,
111 output,
112 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000113}
114
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100115bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
116 const TensorInfo& input1,
117 const TensorInfo& output,
118 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000119{
telsoa01c577f2c2018-08-31 09:22:23 +0100120 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
121 reasonIfUnsupported,
122 input0,
123 input1,
124 output);
telsoa014fcda012018-03-09 14:13:49 +0000125}
126
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100127bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
128 const TensorInfo& output,
129 const TensorInfo& mean,
130 const TensorInfo& var,
131 const TensorInfo& beta,
132 const TensorInfo& gamma,
133 const BatchNormalizationDescriptor& descriptor,
134 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000135{
telsoa01c577f2c2018-08-31 09:22:23 +0100136 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
137 reasonIfUnsupported,
138 input,
139 output,
140 mean,
141 var,
142 beta,
143 gamma,
144 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000145}
146
Jim Flynn906f9462019-05-10 13:55:21 +0100147bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
148 const TensorInfo& output,
149 const OriginsDescriptor& descriptor,
150 Optional<std::string&> reasonIfUnsupported) const
151{
152 ARMNN_NO_DEPRECATE_WARN_BEGIN
153 return IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported);
154 ARMNN_NO_DEPRECATE_WARN_END
155}
156
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100157bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
158 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000159{
160 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
161 output.GetDataType(),
162 &TrueFunc<>,
163 &TrueFunc<>);
164}
165
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100166bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
167 const TensorInfo& output,
168 Optional<std::string&> reasonIfUnsupported) const
169{
170 ignore_unused(input);
171 ignore_unused(output);
172 ignore_unused(reasonIfUnsupported);
173 return true;
174}
175
176bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
177 const TensorInfo& output,
178 Optional<std::string&> reasonIfUnsupported) const
179{
180 ignore_unused(input);
181 ignore_unused(output);
182 ignore_unused(reasonIfUnsupported);
183 return true;
184}
185
186bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
187 const TensorInfo& output,
188 const Convolution2dDescriptor& descriptor,
189 const TensorInfo& weights,
190 const Optional<TensorInfo>& biases,
191 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000192{
surmeh013537c2c2018-05-18 16:31:43 +0100193 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
194 reasonIfUnsupported,
195 input,
196 output,
197 descriptor,
198 weights,
199 biases);
telsoa014fcda012018-03-09 14:13:49 +0000200}
201
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100202bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
203 const TensorInfo& output,
204 const DepthwiseConvolution2dDescriptor& descriptor,
205 const TensorInfo& weights,
206 const Optional<TensorInfo>& biases,
207 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000208{
telsoa01c577f2c2018-08-31 09:22:23 +0100209 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
210 reasonIfUnsupported,
211 input,
212 output,
213 descriptor,
214 weights,
215 biases);
telsoa014fcda012018-03-09 14:13:49 +0000216}
217
Pablo Tellof0bd6832019-04-26 17:58:13 +0100218bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
219 const TensorInfo& output,
220 const DepthwiseConvolution2dDescriptor& descriptor,
221 const TensorInfo& weights,
222 const Optional<TensorInfo>& biases,
223 Optional<std::string&> reasonIfUnsupported) const
224{
225 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
226 reasonIfUnsupported,
227 input,
228 output,
229 descriptor,
230 weights,
231 biases);
232}
233
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100234bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
235 const TensorInfo& output,
236 Optional<std::string&> reasonIfUnsupported) const
237{
238 ignore_unused(output);
239 return IsNeonBackendSupported(reasonIfUnsupported) &&
240 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
241 input.GetDataType(),
242 &FalseFuncF16<>,
243 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000244 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000245 &FalseFuncI32<>,
246 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100247}
248
249bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
250 const TensorInfo& output,
251 const TensorInfo& weights,
252 const TensorInfo& biases,
253 const FullyConnectedDescriptor& descriptor,
254 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000255{
telsoa01c577f2c2018-08-31 09:22:23 +0100256 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
257 reasonIfUnsupported,
258 input,
259 output,
260 weights,
261 biases,
262 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000263}
264
kevmay01eed85922019-01-28 08:37:25 +0000265bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
266 const armnn::TensorInfo& input1,
267 const armnn::TensorInfo& output,
268 armnn::Optional<std::string&> reasonIfUnsupported) const
269{
270 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
271 reasonIfUnsupported,
272 input0,
273 input1,
274 output);
275}
276
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100277bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
278 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000279{
280 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
281 input.GetDataType(),
282 &TrueFunc<>,
283 &TrueFunc<>);
284}
285
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100286bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
287 const TensorInfo& output,
288 const L2NormalizationDescriptor& descriptor,
289 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000290{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100291 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000292}
293
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000294bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
295 const TensorInfo& input1,
296 const TensorInfo& output,
297 Optional<std::string&> reasonIfUnsupported) const
298{
299 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
300 reasonIfUnsupported,
301 input0,
302 input1,
303 output);
304}
305
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100306bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
307 const TensorInfo& output,
308 const MeanDescriptor& descriptor,
309 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100310{
Matthew Benthamfd899962018-12-31 15:49:42 +0000311 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
312 reasonIfUnsupported,
313 input,
314 output,
315 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100316}
317
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000318bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
319 const TensorInfo &output,
320 Optional<std::string &> reasonIfUnsupported) const
321{
322 ignore_unused(input);
323 ignore_unused(output);
324 return true;
325}
326
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100327bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000328 const TensorInfo& output,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100329 const OriginsDescriptor& descriptor,
330 Optional<std::string&> reasonIfUnsupported) const
331{
Derek Lamberti0790dce2019-04-15 18:37:35 +0100332 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
333 {
334 SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
335 return false;
336 }
337
338 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
339 if(concatInnerAxis < 3) // Width, height, or channels
Nikhil Raj8599a412018-11-19 14:51:07 +0000340 {
Jim Flynn39d487d2019-05-17 15:44:36 +0100341 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
Nikhil Raj8599a412018-11-19 14:51:07 +0000342 reasonIfUnsupported,
343 inputs,
344 output,
345 descriptor);
346 }
Derek Lamberti0790dce2019-04-15 18:37:35 +0100347 else if (concatInnerAxis == 3)
348 {
349 for (auto& input : inputs)
350 {
351 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
352 {
353 SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
354 return false;
355 }
356 }
357 return true; // Sub-tensors support concat along batch
358 }
359 else // > 4 dimensions not supported.
360 {
361 SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
362 return false;
363 }
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100364}
365
Conor Kennedy54b21692019-01-09 07:57:38 +0000366bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
367 const TensorInfo& input1,
368 const TensorInfo& output,
369 Optional<std::string&> reasonIfUnsupported) const
370{
371 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
372 reasonIfUnsupported,
373 input0,
374 input1,
375 output);
376}
377
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100378bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
379 const TensorInfo& input1,
380 const TensorInfo& output,
381 Optional<std::string&> reasonIfUnsupported) const
382{
383 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
384 reasonIfUnsupported,
385 input0,
386 input1,
387 output);
388}
389
390bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
391 const TensorInfo& output,
392 const NormalizationDescriptor& descriptor,
393 Optional<std::string&> reasonIfUnsupported) const
394{
395 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
396 reasonIfUnsupported,
397 input,
398 output,
399 descriptor);
400}
401
402bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
403 Optional<std::string&> reasonIfUnsupported) const
404{
kevmay012b4d88e2019-01-24 14:05:09 +0000405 return IsNeonBackendSupported(reasonIfUnsupported) &&
406 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
407 output.GetDataType(),
408 &TrueFunc<>,
409 &TrueFunc<>,
410 &TrueFunc<>,
411 &FalseFuncI32<>,
412 &TrueFunc<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100413}
414
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000415bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
416 const TensorInfo& output,
417 const PadDescriptor& descriptor,
418 Optional<std::string&> reasonIfUnsupported) const
419{
420 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
421 reasonIfUnsupported,
422 input,
423 output,
424 descriptor);
425}
426
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100427bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
428 const TensorInfo& output,
429 const PermuteDescriptor& descriptor,
430 Optional<std::string&> reasonIfUnsupported) const
431{
432 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000433}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100434
435bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
436 const TensorInfo& output,
437 const Pooling2dDescriptor& descriptor,
438 Optional<std::string&> reasonIfUnsupported) const
439{
440 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
441}
442
443bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000444 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100445 Optional<std::string&> reasonIfUnsupported) const
446{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000447 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100448 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
449 input.GetDataType(),
450 &TrueFunc<>,
451 &TrueFunc<>);
452}
453
Sadik Armaganc625f002018-12-17 11:32:16 +0000454bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
455 const TensorInfo& output,
456 Optional<std::string&> reasonIfUnsupported) const
457{
458 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
459 reasonIfUnsupported,
460 input,
461 output);
462}
463
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100464bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
465 const TensorInfo& output,
466 const SoftmaxDescriptor& descriptor,
467 Optional<std::string&> reasonIfUnsupported) const
468{
469 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
470}
471
472bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
473 const ViewsDescriptor& descriptor,
474 Optional<std::string&> reasonIfUnsupported) const
475{
476 ignore_unused(descriptor);
477 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
478 input.GetDataType(),
479 &TrueFunc<>,
480 &TrueFunc<>);
481}
482
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100483bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
484 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
485 const ViewsDescriptor& descriptor,
486 Optional<std::string&> reasonIfUnsupported) const
487{
488#if defined(ARMCOMPUTENEON_ENABLED)
489 // Split along the last dimension, cannot use sub-tensors
490 // as width and height of the sub-tensors do not match
491 // the width and height of the parent tensor
492 // in case of input with more than 2D.
493 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
494 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
495 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
496 {
497 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
498 reasonIfUnsupported,
499 input,
500 outputs,
501 *splitAxis.begin());
502 }
503#endif
504 for (auto output : outputs)
505 {
506 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
507 {
508 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
509 return false;
510 }
511 }
512 return true;
513}
514
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100515bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
516 const TensorInfo& input1,
517 const TensorInfo& output,
518 Optional<std::string&> reasonIfUnsupported) const
519{
520 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
521 reasonIfUnsupported,
522 input0,
523 input1,
524 output);
525}
526
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100527} // namespace armnn