blob: c05456b01cc57e9cbb937de58b176255556d09b1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010020#include <aclCommon/ArmComputeUtils.hpp>
Matthew Bentham955258d2018-12-10 10:48:52 +000021#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010022#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000023#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010024#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010025#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
Narumol Prangnawarat01961a72019-05-30 16:47:12 +010026#include "workloads/NeonDequantizeWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000027#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010028#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000029#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000030#include "workloads/NeonMeanWorkload.hpp"
Jim Flynn39d487d2019-05-17 15:44:36 +010031#include "workloads/NeonConcatWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000032#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000033#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010034#include "workloads/NeonNormalizationFloatWorkload.hpp"
35#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000036#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010037#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010038#include "workloads/NeonPooling2dWorkload.hpp"
Nikhil Raj9b461482019-07-03 15:58:31 +010039#include "workloads/NeonPreluWorkload.hpp"
Sadik Armaganfabc2892019-05-31 09:05:11 +010040#include "workloads/NeonQuantizeWorkload.hpp"
Sadik Armaganc625f002018-12-17 11:32:16 +000041#include "workloads/NeonResizeBilinearWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010042#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010043#include "workloads/NeonSplitterWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000044#include "workloads/NeonSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000045#endif
46
47using namespace boost;
48
49namespace armnn
50{
telsoa014fcda012018-03-09 14:13:49 +000051
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010052namespace
arovir017ff76c52018-10-09 09:40:58 +010053{
telsoa014fcda012018-03-09 14:13:49 +000054
arovir01085f0a42018-10-08 14:48:19 +010055bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000056{
Matteo Martincighd95e9062019-01-31 15:35:59 +000057#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000058 return true;
59#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010060 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000061 return false;
62#endif
63}
64
telsoa01c577f2c2018-08-31 09:22:23 +010065template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010066bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000067 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010068 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000069 Uint8Func uint8FuncPtr,
70 Params&&... params)
71{
72 return IsNeonBackendSupported(reasonIfUnsupported) &&
73 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
74 dataType,
75 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010076 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000077 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000078 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000079 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000080 std::forward<Params>(params)...);
81}
82
Matteo Martincighd95e9062019-01-31 15:35:59 +000083#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000084template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010085inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000086{
87 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
88 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
89 if (!supported && reasonIfUnsupported)
90 {
arovir01085f0a42018-10-08 14:48:19 +010091 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +000092 }
93 return supported;
94}
95
96#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
97 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
98#else
99#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
100 return IsNeonBackendSupported(reasonIfUnsupported);
101#endif
102
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100103} // anonymous namespace
104
105bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
106 const TensorInfo& output,
107 const ActivationDescriptor& descriptor,
108 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000109{
110 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100111 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
112 reasonIfUnsupported,
113 input,
114 output,
115 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000116}
117
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100118bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
119 const TensorInfo& input1,
120 const TensorInfo& output,
121 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000122{
telsoa01c577f2c2018-08-31 09:22:23 +0100123 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
124 reasonIfUnsupported,
125 input0,
126 input1,
127 output);
telsoa014fcda012018-03-09 14:13:49 +0000128}
129
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100130bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
131 const TensorInfo& output,
132 const TensorInfo& mean,
133 const TensorInfo& var,
134 const TensorInfo& beta,
135 const TensorInfo& gamma,
136 const BatchNormalizationDescriptor& descriptor,
137 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000138{
telsoa01c577f2c2018-08-31 09:22:23 +0100139 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
140 reasonIfUnsupported,
141 input,
142 output,
143 mean,
144 var,
145 beta,
146 gamma,
147 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000148}
149
Jim Flynn906f9462019-05-10 13:55:21 +0100150bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
151 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100152 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100153 Optional<std::string&> reasonIfUnsupported) const
154{
Jim Flynne242f2d2019-05-22 14:24:13 +0100155 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
156 {
157 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
158 return false;
159 }
160
161 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
162 if(concatInnerAxis < 3) // Width, height, or channels
163 {
164 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
165 reasonIfUnsupported,
166 inputs,
167 output,
168 descriptor);
169 }
170 else if (concatInnerAxis == 3)
171 {
172 for (auto& input : inputs)
173 {
174 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
175 {
176 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
177 return false;
178 }
179 }
180 return true; // Sub-tensors support concat along batch
181 }
182 else // > 4 dimensions not supported.
183 {
184 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
185 return false;
186 }
Jim Flynn906f9462019-05-10 13:55:21 +0100187}
188
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100189bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
190 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000191{
192 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
193 output.GetDataType(),
194 &TrueFunc<>,
195 &TrueFunc<>);
196}
197
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100198bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
199 const TensorInfo& output,
200 Optional<std::string&> reasonIfUnsupported) const
201{
202 ignore_unused(input);
203 ignore_unused(output);
204 ignore_unused(reasonIfUnsupported);
205 return true;
206}
207
208bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
209 const TensorInfo& output,
210 Optional<std::string&> reasonIfUnsupported) const
211{
212 ignore_unused(input);
213 ignore_unused(output);
214 ignore_unused(reasonIfUnsupported);
215 return true;
216}
217
218bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
219 const TensorInfo& output,
220 const Convolution2dDescriptor& descriptor,
221 const TensorInfo& weights,
222 const Optional<TensorInfo>& biases,
223 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000224{
surmeh013537c2c2018-05-18 16:31:43 +0100225 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
226 reasonIfUnsupported,
227 input,
228 output,
229 descriptor,
230 weights,
231 biases);
telsoa014fcda012018-03-09 14:13:49 +0000232}
233
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100234bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
235 const TensorInfo& output,
236 const DepthwiseConvolution2dDescriptor& descriptor,
237 const TensorInfo& weights,
238 const Optional<TensorInfo>& biases,
239 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000240{
telsoa01c577f2c2018-08-31 09:22:23 +0100241 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
242 reasonIfUnsupported,
243 input,
244 output,
245 descriptor,
246 weights,
247 biases);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Narumol Prangnawarat01961a72019-05-30 16:47:12 +0100250bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
251 const TensorInfo& output,
252 Optional<std::string&> reasonIfUnsupported) const
253{
254 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
255 reasonIfUnsupported,
256 input,
257 output);
258}
259
Pablo Tellof0bd6832019-04-26 17:58:13 +0100260bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
261 const TensorInfo& output,
262 const DepthwiseConvolution2dDescriptor& descriptor,
263 const TensorInfo& weights,
264 const Optional<TensorInfo>& biases,
265 Optional<std::string&> reasonIfUnsupported) const
266{
267 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
268 reasonIfUnsupported,
269 input,
270 output,
271 descriptor,
272 weights,
273 biases);
274}
275
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100276bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
277 const TensorInfo& output,
278 Optional<std::string&> reasonIfUnsupported) const
279{
280 ignore_unused(output);
281 return IsNeonBackendSupported(reasonIfUnsupported) &&
282 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
283 input.GetDataType(),
284 &FalseFuncF16<>,
285 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000286 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000287 &FalseFuncI32<>,
288 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100289}
290
291bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
292 const TensorInfo& output,
293 const TensorInfo& weights,
294 const TensorInfo& biases,
295 const FullyConnectedDescriptor& descriptor,
296 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000297{
telsoa01c577f2c2018-08-31 09:22:23 +0100298 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
299 reasonIfUnsupported,
300 input,
301 output,
302 weights,
303 biases,
304 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000305}
306
kevmay01eed85922019-01-28 08:37:25 +0000307bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
308 const armnn::TensorInfo& input1,
309 const armnn::TensorInfo& output,
310 armnn::Optional<std::string&> reasonIfUnsupported) const
311{
312 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
313 reasonIfUnsupported,
314 input0,
315 input1,
316 output);
317}
318
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100319bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
320 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000321{
322 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
323 input.GetDataType(),
324 &TrueFunc<>,
325 &TrueFunc<>);
326}
327
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100328bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
329 const TensorInfo& output,
330 const L2NormalizationDescriptor& descriptor,
331 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000332{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100333 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000334}
335
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000336bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
337 const TensorInfo& input1,
338 const TensorInfo& output,
339 Optional<std::string&> reasonIfUnsupported) const
340{
341 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
342 reasonIfUnsupported,
343 input0,
344 input1,
345 output);
346}
347
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100348bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
349 const TensorInfo& output,
350 const MeanDescriptor& descriptor,
351 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100352{
Matthew Benthamfd899962018-12-31 15:49:42 +0000353 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
354 reasonIfUnsupported,
355 input,
356 output,
357 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100358}
359
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000360bool NeonLayerSupport::IsMemCopySupported(const TensorInfo &input,
361 const TensorInfo &output,
362 Optional<std::string &> reasonIfUnsupported) const
363{
364 ignore_unused(input);
365 ignore_unused(output);
366 return true;
367}
368
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100369bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000370 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100371 const MergerDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100372 Optional<std::string&> reasonIfUnsupported) const
373{
Jim Flynne242f2d2019-05-22 14:24:13 +0100374 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100375}
376
Conor Kennedy54b21692019-01-09 07:57:38 +0000377bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
378 const TensorInfo& input1,
379 const TensorInfo& output,
380 Optional<std::string&> reasonIfUnsupported) const
381{
382 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
383 reasonIfUnsupported,
384 input0,
385 input1,
386 output);
387}
388
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100389bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
390 const TensorInfo& input1,
391 const TensorInfo& output,
392 Optional<std::string&> reasonIfUnsupported) const
393{
394 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
395 reasonIfUnsupported,
396 input0,
397 input1,
398 output);
399}
400
401bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
402 const TensorInfo& output,
403 const NormalizationDescriptor& descriptor,
404 Optional<std::string&> reasonIfUnsupported) const
405{
406 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
407 reasonIfUnsupported,
408 input,
409 output,
410 descriptor);
411}
412
413bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
414 Optional<std::string&> reasonIfUnsupported) const
415{
kevmay012b4d88e2019-01-24 14:05:09 +0000416 return IsNeonBackendSupported(reasonIfUnsupported) &&
417 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
418 output.GetDataType(),
419 &TrueFunc<>,
420 &TrueFunc<>,
421 &TrueFunc<>,
422 &FalseFuncI32<>,
423 &TrueFunc<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100424}
425
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000426bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
427 const TensorInfo& output,
428 const PadDescriptor& descriptor,
429 Optional<std::string&> reasonIfUnsupported) const
430{
431 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
432 reasonIfUnsupported,
433 input,
434 output,
435 descriptor);
436}
437
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100438bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
439 const TensorInfo& output,
440 const PermuteDescriptor& descriptor,
441 Optional<std::string&> reasonIfUnsupported) const
442{
443 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000444}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100445
446bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
447 const TensorInfo& output,
448 const Pooling2dDescriptor& descriptor,
449 Optional<std::string&> reasonIfUnsupported) const
450{
451 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
452}
453
Nikhil Raj9b461482019-07-03 15:58:31 +0100454bool NeonLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
455 const armnn::TensorInfo &alpha,
456 const armnn::TensorInfo &output,
457 armnn::Optional<std::string &> reasonIfUnsupported) const
458{
459 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
460}
461
Sadik Armaganfabc2892019-05-31 09:05:11 +0100462bool NeonLayerSupport::IsQuantizeSupported(const TensorInfo& input,
463 const TensorInfo& output,
464 Optional<std::string&> reasonIfUnsupported) const
465{
466 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate,
467 reasonIfUnsupported,
468 input,
469 output);
470}
471
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100472bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000473 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100474 Optional<std::string&> reasonIfUnsupported) const
475{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000476 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100477 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
478 input.GetDataType(),
479 &TrueFunc<>,
480 &TrueFunc<>);
481}
482
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100483bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
484 const TensorInfo& output,
485 const ResizeDescriptor& descriptor,
486 Optional<std::string&> reasonIfUnsupported) const
487{
488 if (descriptor.m_Method == ResizeMethod::Bilinear)
489 {
490 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
491 reasonIfUnsupported,
492 input,
493 output);
494 }
495
496 return false;
497}
498
Sadik Armaganc625f002018-12-17 11:32:16 +0000499bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
500 const TensorInfo& output,
501 Optional<std::string&> reasonIfUnsupported) const
502{
503 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
504 reasonIfUnsupported,
505 input,
506 output);
507}
508
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100509bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
510 const TensorInfo& output,
511 const SoftmaxDescriptor& descriptor,
512 Optional<std::string&> reasonIfUnsupported) const
513{
514 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
515}
516
517bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
518 const ViewsDescriptor& descriptor,
519 Optional<std::string&> reasonIfUnsupported) const
520{
521 ignore_unused(descriptor);
522 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
523 input.GetDataType(),
524 &TrueFunc<>,
525 &TrueFunc<>);
526}
527
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100528bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
529 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
530 const ViewsDescriptor& descriptor,
531 Optional<std::string&> reasonIfUnsupported) const
532{
533#if defined(ARMCOMPUTENEON_ENABLED)
534 // Split along the last dimension, cannot use sub-tensors
535 // as width and height of the sub-tensors do not match
536 // the width and height of the parent tensor
537 // in case of input with more than 2D.
538 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
539 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
540 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
541 {
542 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
543 reasonIfUnsupported,
544 input,
545 outputs,
546 *splitAxis.begin());
547 }
548#endif
549 for (auto output : outputs)
550 {
551 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
552 {
553 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
554 return false;
555 }
556 }
557 return true;
558}
559
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100560bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
561 const TensorInfo& input1,
562 const TensorInfo& output,
563 Optional<std::string&> reasonIfUnsupported) const
564{
565 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
566 reasonIfUnsupported,
567 input0,
568 input1,
569 output);
570}
571
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100572} // namespace armnn