blob: 0d4d4c345957ec2673ab647e3171c141911b7c51 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
telsoa014fcda012018-03-09 14:13:49 +00009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010013#include <armnn/Types.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
David Beck111b5d92018-11-12 14:59:37 +000015#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010016
telsoa014fcda012018-03-09 14:13:49 +000017#include <boost/core/ignore_unused.hpp>
18
Matteo Martincighd95e9062019-01-31 15:35:59 +000019#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010020#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar914e4db2019-09-09 13:36:45 +010021#include "workloads/NeonAbsWorkload.hpp"
Matthew Bentham955258d2018-12-10 10:48:52 +000022#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010023#include "workloads/NeonActivationWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000024#include "workloads/NeonBatchNormalizationWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010025#include "workloads/NeonConvolution2dWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010026#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
Narumol Prangnawarat01961a72019-05-30 16:47:12 +010027#include "workloads/NeonDequantizeWorkload.hpp"
kevmay01eed85922019-01-28 08:37:25 +000028#include "workloads/NeonGreaterWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010029#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Jan Eilersad5293a2019-07-08 09:57:55 +010030#include "workloads/NeonLstmFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000031#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000032#include "workloads/NeonMeanWorkload.hpp"
Jim Flynn39d487d2019-05-17 15:44:36 +010033#include "workloads/NeonConcatWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000034#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000035#include "workloads/NeonMultiplicationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010036#include "workloads/NeonNormalizationFloatWorkload.hpp"
37#include "workloads/NeonFullyConnectedWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000038#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010039#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010040#include "workloads/NeonPooling2dWorkload.hpp"
Nikhil Raj9b461482019-07-03 15:58:31 +010041#include "workloads/NeonPreluWorkload.hpp"
Sadik Armaganfabc2892019-05-31 09:05:11 +010042#include "workloads/NeonQuantizeWorkload.hpp"
Francis Murtagh4fc3c482019-08-02 13:20:54 +010043#include "workloads/NeonQuantizedLstmWorkload.hpp"
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +010044#include "workloads/NeonResizeWorkload.hpp"
Aron Virginas-Tar0dd3b432019-09-10 13:55:09 +010045#include "workloads/NeonRsqrtWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010046#include "workloads/NeonSoftmaxBaseWorkload.hpp"
Ellen Norris-Thompson29794572019-06-26 16:40:36 +010047#include "workloads/NeonSpaceToDepthWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010048#include "workloads/NeonSplitterWorkload.hpp"
Matthew Jackson87f65ea2019-08-01 10:01:34 +010049#include "workloads/NeonStackWorkload.hpp"
FinnWilliamsArm1fa19192019-08-02 17:26:31 +010050#include "workloads/NeonStridedSliceWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000051#include "workloads/NeonSubtractionWorkload.hpp"
Sadik Armagan581742d2019-08-12 14:11:37 +010052#include "workloads/NeonTransposeConvolution2dWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000053#endif
54
55using namespace boost;
56
57namespace armnn
58{
telsoa014fcda012018-03-09 14:13:49 +000059
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010060namespace
arovir017ff76c52018-10-09 09:40:58 +010061{
telsoa014fcda012018-03-09 14:13:49 +000062
arovir01085f0a42018-10-08 14:48:19 +010063bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000064{
Matteo Martincighd95e9062019-01-31 15:35:59 +000065#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000066 return true;
67#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010068 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +000069 return false;
70#endif
71}
72
telsoa01c577f2c2018-08-31 09:22:23 +010073template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +010074bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +000075 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +010076 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000077 Uint8Func uint8FuncPtr,
78 Params&&... params)
79{
80 return IsNeonBackendSupported(reasonIfUnsupported) &&
81 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
82 dataType,
83 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +010084 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +000085 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +000086 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +000087 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +000088 std::forward<Params>(params)...);
89}
90
Matteo Martincighd95e9062019-01-31 15:35:59 +000091#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000092template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010093inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000094{
95 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
96 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
97 if (!supported && reasonIfUnsupported)
98 {
arovir01085f0a42018-10-08 14:48:19 +010099 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000100 }
101 return supported;
102}
103
104#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
105 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
106#else
107#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
108 return IsNeonBackendSupported(reasonIfUnsupported);
109#endif
110
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100111} // anonymous namespace
112
Aron Virginas-Tar914e4db2019-09-09 13:36:45 +0100113bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
114 const TensorInfo& output,
115 Optional<std::string&> reasonIfUnsupported) const
116{
117 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
118 reasonIfUnsupported,
119 input,
120 output);
121}
122
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100123bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
124 const TensorInfo& output,
125 const ActivationDescriptor& descriptor,
126 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000127{
128 ignore_unused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100129 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
130 reasonIfUnsupported,
131 input,
132 output,
133 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000134}
135
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100136bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
137 const TensorInfo& input1,
138 const TensorInfo& output,
139 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000140{
telsoa01c577f2c2018-08-31 09:22:23 +0100141 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
142 reasonIfUnsupported,
143 input0,
144 input1,
145 output);
telsoa014fcda012018-03-09 14:13:49 +0000146}
147
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100148bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
149 const TensorInfo& output,
150 const TensorInfo& mean,
151 const TensorInfo& var,
152 const TensorInfo& beta,
153 const TensorInfo& gamma,
154 const BatchNormalizationDescriptor& descriptor,
155 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000156{
telsoa01c577f2c2018-08-31 09:22:23 +0100157 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
158 reasonIfUnsupported,
159 input,
160 output,
161 mean,
162 var,
163 beta,
164 gamma,
165 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000166}
167
Jim Flynn906f9462019-05-10 13:55:21 +0100168bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
169 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100170 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100171 Optional<std::string&> reasonIfUnsupported) const
172{
Jim Flynne242f2d2019-05-22 14:24:13 +0100173 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
174 {
175 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
176 return false;
177 }
178
179 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
180 if(concatInnerAxis < 3) // Width, height, or channels
181 {
182 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
183 reasonIfUnsupported,
184 inputs,
185 output,
186 descriptor);
187 }
188 else if (concatInnerAxis == 3)
189 {
190 for (auto& input : inputs)
191 {
192 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
193 {
194 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
195 return false;
196 }
197 }
198 return true; // Sub-tensors support concat along batch
199 }
200 else // > 4 dimensions not supported.
201 {
202 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
203 return false;
204 }
Jim Flynn906f9462019-05-10 13:55:21 +0100205}
206
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100207bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
208 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000209{
210 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
211 output.GetDataType(),
212 &TrueFunc<>,
213 &TrueFunc<>);
214}
215
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100216bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
217 const TensorInfo& output,
218 Optional<std::string&> reasonIfUnsupported) const
219{
220 ignore_unused(input);
221 ignore_unused(output);
222 ignore_unused(reasonIfUnsupported);
223 return true;
224}
225
226bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
227 const TensorInfo& output,
228 Optional<std::string&> reasonIfUnsupported) const
229{
230 ignore_unused(input);
231 ignore_unused(output);
232 ignore_unused(reasonIfUnsupported);
233 return true;
234}
235
236bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
237 const TensorInfo& output,
238 const Convolution2dDescriptor& descriptor,
239 const TensorInfo& weights,
240 const Optional<TensorInfo>& biases,
241 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000242{
surmeh013537c2c2018-05-18 16:31:43 +0100243 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
244 reasonIfUnsupported,
245 input,
246 output,
247 descriptor,
248 weights,
249 biases);
telsoa014fcda012018-03-09 14:13:49 +0000250}
251
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100252bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
253 const TensorInfo& output,
254 const DepthwiseConvolution2dDescriptor& descriptor,
255 const TensorInfo& weights,
256 const Optional<TensorInfo>& biases,
257 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000258{
telsoa01c577f2c2018-08-31 09:22:23 +0100259 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
260 reasonIfUnsupported,
261 input,
262 output,
263 descriptor,
264 weights,
265 biases);
telsoa014fcda012018-03-09 14:13:49 +0000266}
267
Narumol Prangnawarat01961a72019-05-30 16:47:12 +0100268bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
269 const TensorInfo& output,
270 Optional<std::string&> reasonIfUnsupported) const
271{
272 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
273 reasonIfUnsupported,
274 input,
275 output);
276}
277
Pablo Tellof0bd6832019-04-26 17:58:13 +0100278bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
279 const TensorInfo& output,
280 const DepthwiseConvolution2dDescriptor& descriptor,
281 const TensorInfo& weights,
282 const Optional<TensorInfo>& biases,
283 Optional<std::string&> reasonIfUnsupported) const
284{
285 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
286 reasonIfUnsupported,
287 input,
288 output,
289 descriptor,
290 weights,
291 biases);
292}
293
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100294bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
295 const TensorInfo& output,
296 Optional<std::string&> reasonIfUnsupported) const
297{
298 ignore_unused(output);
299 return IsNeonBackendSupported(reasonIfUnsupported) &&
300 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
301 input.GetDataType(),
302 &FalseFuncF16<>,
303 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000304 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000305 &FalseFuncI32<>,
306 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100307}
308
309bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
310 const TensorInfo& output,
311 const TensorInfo& weights,
312 const TensorInfo& biases,
313 const FullyConnectedDescriptor& descriptor,
314 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000315{
telsoa01c577f2c2018-08-31 09:22:23 +0100316 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
317 reasonIfUnsupported,
318 input,
319 output,
320 weights,
321 biases,
322 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000323}
324
kevmay01eed85922019-01-28 08:37:25 +0000325bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
326 const armnn::TensorInfo& input1,
327 const armnn::TensorInfo& output,
328 armnn::Optional<std::string&> reasonIfUnsupported) const
329{
330 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGreaterWorkloadValidate,
331 reasonIfUnsupported,
332 input0,
333 input1,
334 output);
335}
336
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100337bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
338 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000339{
Francis Murtaghb3fc2522019-08-09 13:20:50 +0100340 return IsNeonBackendSupported(reasonIfUnsupported);
telsoa014fcda012018-03-09 14:13:49 +0000341}
342
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100343bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
344 const TensorInfo& output,
345 const L2NormalizationDescriptor& descriptor,
346 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000347{
Matteo Martincighbcd3c852018-09-28 14:14:12 +0100348 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000349}
350
Jan Eilersad5293a2019-07-08 09:57:55 +0100351bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
352 const TensorInfo& outputStateIn,
353 const TensorInfo& cellStateIn,
354 const TensorInfo& scratchBuffer,
355 const TensorInfo& outputStateOut,
356 const TensorInfo& cellStateOut,
357 const TensorInfo& output,
358 const LstmDescriptor& descriptor,
359 const LstmInputParamsInfo& paramsInfo,
360 Optional<std::string&> reasonIfUnsupported) const
361{
362 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLstmFloatWorkloadValidate,
363 reasonIfUnsupported,
364 input,
365 outputStateIn,
366 cellStateIn,
367 scratchBuffer,
368 outputStateOut,
369 cellStateOut,
370 output,
371 descriptor,
372 paramsInfo);
373}
374
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +0000375bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
376 const TensorInfo& input1,
377 const TensorInfo& output,
378 Optional<std::string&> reasonIfUnsupported) const
379{
380 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
381 reasonIfUnsupported,
382 input0,
383 input1,
384 output);
385}
386
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100387bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
388 const TensorInfo& output,
389 const MeanDescriptor& descriptor,
390 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100391{
Matthew Benthamfd899962018-12-31 15:49:42 +0000392 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
393 reasonIfUnsupported,
394 input,
395 output,
396 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100397}
398
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100399bool NeonLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000400 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100401 const MergerDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100402 Optional<std::string&> reasonIfUnsupported) const
403{
Jim Flynne242f2d2019-05-22 14:24:13 +0100404 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100405}
406
Conor Kennedy54b21692019-01-09 07:57:38 +0000407bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
408 const TensorInfo& input1,
409 const TensorInfo& output,
410 Optional<std::string&> reasonIfUnsupported) const
411{
412 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
413 reasonIfUnsupported,
414 input0,
415 input1,
416 output);
417}
418
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100419bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
420 const TensorInfo& input1,
421 const TensorInfo& output,
422 Optional<std::string&> reasonIfUnsupported) const
423{
424 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
425 reasonIfUnsupported,
426 input0,
427 input1,
428 output);
429}
430
431bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
432 const TensorInfo& output,
433 const NormalizationDescriptor& descriptor,
434 Optional<std::string&> reasonIfUnsupported) const
435{
436 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
437 reasonIfUnsupported,
438 input,
439 output,
440 descriptor);
441}
442
443bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
444 Optional<std::string&> reasonIfUnsupported) const
445{
Francis Murtaghb3fc2522019-08-09 13:20:50 +0100446 return IsNeonBackendSupported(reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100447}
448
Éanna Ó Catháin12055742019-01-25 10:01:40 +0000449bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
450 const TensorInfo& output,
451 const PadDescriptor& descriptor,
452 Optional<std::string&> reasonIfUnsupported) const
453{
454 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
455 reasonIfUnsupported,
456 input,
457 output,
458 descriptor);
459}
460
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100461bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
462 const TensorInfo& output,
463 const PermuteDescriptor& descriptor,
464 Optional<std::string&> reasonIfUnsupported) const
465{
466 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000467}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100468
469bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
470 const TensorInfo& output,
471 const Pooling2dDescriptor& descriptor,
472 Optional<std::string&> reasonIfUnsupported) const
473{
474 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
475}
476
Nikhil Raj9b461482019-07-03 15:58:31 +0100477bool NeonLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
478 const armnn::TensorInfo &alpha,
479 const armnn::TensorInfo &output,
480 armnn::Optional<std::string &> reasonIfUnsupported) const
481{
482 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
483}
484
Sadik Armaganfabc2892019-05-31 09:05:11 +0100485bool NeonLayerSupport::IsQuantizeSupported(const TensorInfo& input,
486 const TensorInfo& output,
487 Optional<std::string&> reasonIfUnsupported) const
488{
489 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate,
490 reasonIfUnsupported,
491 input,
492 output);
493}
494
Francis Murtagh4fc3c482019-08-02 13:20:54 +0100495bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
496 const TensorInfo& cellStateIn,
497 const TensorInfo& outputStateIn,
498 const TensorInfo& cellStateOut,
499 const TensorInfo& outputStateOut,
500 const QuantizedLstmInputParamsInfo& paramsInfo,
501 Optional<std::string&> reasonIfUnsupported) const
502{
503 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizedLstmWorkloadValidate,
504 reasonIfUnsupported,
505 input,
506 cellStateIn,
507 outputStateIn,
508 cellStateOut,
509 outputStateOut,
510 paramsInfo);
511}
512
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100513bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000514 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100515 Optional<std::string&> reasonIfUnsupported) const
516{
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000517 ignore_unused(descriptor);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100518 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
519 input.GetDataType(),
520 &TrueFunc<>,
521 &TrueFunc<>);
522}
523
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100524bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
525 const TensorInfo& output,
526 const ResizeDescriptor& descriptor,
527 Optional<std::string&> reasonIfUnsupported) const
528{
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100529 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
530 reasonIfUnsupported,
531 input,
532 output,
533 descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100534}
535
Sadik Armaganc625f002018-12-17 11:32:16 +0000536bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
537 const TensorInfo& output,
538 Optional<std::string&> reasonIfUnsupported) const
539{
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +0100540 ResizeDescriptor descriptor;
541 descriptor.m_Method = ResizeMethod::Bilinear;
542 descriptor.m_DataLayout = DataLayout::NCHW;
543
544 const TensorShape& outputShape = output.GetShape();
545 descriptor.m_TargetHeight = outputShape[2];
546 descriptor.m_TargetWidth = outputShape[3];
547
548 return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
Sadik Armaganc625f002018-12-17 11:32:16 +0000549}
550
Aron Virginas-Tar0dd3b432019-09-10 13:55:09 +0100551bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input,
552 const TensorInfo& output,
553 Optional<std::string&> reasonIfUnsupported) const
554{
555 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output);
556}
557
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100558bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
559 const TensorInfo& output,
560 const SoftmaxDescriptor& descriptor,
561 Optional<std::string&> reasonIfUnsupported) const
562{
563 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
564}
565
Ellen Norris-Thompson29794572019-06-26 16:40:36 +0100566bool NeonLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
567 const TensorInfo& output,
568 const SpaceToDepthDescriptor& descriptor,
569 Optional<std::string&> reasonIfUnsupported) const
570{
571 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToDepthWorkloadValidate,
572 reasonIfUnsupported,
573 input,
574 output,
575 descriptor);
576}
577
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100578bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
579 const ViewsDescriptor& descriptor,
580 Optional<std::string&> reasonIfUnsupported) const
581{
582 ignore_unused(descriptor);
583 return IsSupportedForDataTypeNeon(reasonIfUnsupported,
584 input.GetDataType(),
585 &TrueFunc<>,
586 &TrueFunc<>);
587}
588
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100589bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
590 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
591 const ViewsDescriptor& descriptor,
592 Optional<std::string&> reasonIfUnsupported) const
593{
594#if defined(ARMCOMPUTENEON_ENABLED)
595 // Split along the last dimension, cannot use sub-tensors
596 // as width and height of the sub-tensors do not match
597 // the width and height of the parent tensor
598 // in case of input with more than 2D.
599 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
600 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
601 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
602 {
603 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
604 reasonIfUnsupported,
605 input,
606 outputs,
607 *splitAxis.begin());
608 }
609#endif
610 for (auto output : outputs)
611 {
612 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
613 {
614 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
615 return false;
616 }
617 }
618 return true;
619}
620
Matthew Jackson87f65ea2019-08-01 10:01:34 +0100621bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
622 const TensorInfo& output,
623 const StackDescriptor& descriptor,
624 Optional<std::string&> reasonIfUnsupported) const
625{
626 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStackWorkloadValidate,
627 reasonIfUnsupported,
628 inputs,
629 output,
630 descriptor);
631}
632
FinnWilliamsArm1fa19192019-08-02 17:26:31 +0100633bool NeonLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
634 const TensorInfo& output,
635 const StridedSliceDescriptor& descriptor,
636 Optional<std::string&> reasonIfUnsupported) const
637{
638 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStridedSliceWorkloadValidate,
639 reasonIfUnsupported,
640 input,
641 output,
642 descriptor);
643}
644
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100645bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
646 const TensorInfo& input1,
647 const TensorInfo& output,
648 Optional<std::string&> reasonIfUnsupported) const
649{
650 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
651 reasonIfUnsupported,
652 input0,
653 input1,
654 output);
655}
656
Sadik Armagan581742d2019-08-12 14:11:37 +0100657bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
658 const TensorInfo& output,
659 const TransposeConvolution2dDescriptor& descriptor,
660 const TensorInfo& weights,
661 const Optional<TensorInfo>& biases,
662 Optional<std::string&> reasonIfUnsupported) const
663{
664 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeConvolution2dWorkloadValidate,
665 reasonIfUnsupported,
666 input,
667 output,
668 descriptor,
669 weights,
670 biases);
671}
672
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100673} // namespace armnn