blob: ee155a2c64f62bb8f7de802d88fbd319de4b68ed [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "NeonBackendModelContext.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
Derek Lambertic77874a2020-04-28 13:34:56 +010010#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010012#include <armnn/Types.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000013#include <armnn/BackendRegistry.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Matteo Martincighc601aa62019-10-29 15:03:22 +000015#include <InternalTypes.hpp>
16#include <LayerSupportCommon.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
Sadik Armagan045f6be2020-09-10 13:37:32 +010018#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
Matteo Martincighd95e9062019-01-31 15:35:59 +000020#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010021#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000022#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar914e4db2019-09-09 13:36:45 +010023#include "workloads/NeonAbsWorkload.hpp"
Matthew Bentham955258d2018-12-10 10:48:52 +000024#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010025#include "workloads/NeonActivationWorkload.hpp"
James Conroyd47a0642019-09-17 14:22:06 +010026#include "workloads/NeonArgMinMaxWorkload.hpp"
Teresa Charlin0f86ecf2022-10-13 15:47:08 +010027#include "workloads/NeonBatchMatMulWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000028#include "workloads/NeonBatchNormalizationWorkload.hpp"
Mike Kelly56858022020-01-27 12:14:47 +000029#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
Sadik Armagan48f011e2021-04-21 10:50:34 +010030#include "workloads/NeonCastWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010031#include "workloads/NeonChannelShuffleWorkload.hpp"
Teresa Charlincedd34f2020-03-30 11:17:30 +010032#include "workloads/NeonComparisonWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010033#include "workloads/NeonConcatWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010034#include "workloads/NeonConstantWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010035#include "workloads/NeonConvolution2dWorkload.hpp"
Teresa Charlinec5f7d12021-10-22 17:15:00 +010036#include "workloads/NeonConvolution3dWorkload.hpp"
Aron Virginas-Tar2f00b742019-09-30 13:28:08 +010037#include "workloads/NeonDepthToSpaceWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010038#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
Narumol Prangnawarat01961a72019-05-30 16:47:12 +010039#include "workloads/NeonDequantizeWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010040#include "workloads/NeonExpWorkload.hpp"
Sadik Armagan0d4863d2019-10-09 14:26:32 +010041#include "workloads/NeonInstanceNormalizationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010042#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010043#include "workloads/NeonLogWorkload.hpp"
Keith Davis69e653f2020-07-02 11:49:26 +010044#include "workloads/NeonLogSoftmaxWorkload.hpp"
James Conroy177df1e2020-11-13 10:18:51 +000045#include "workloads/NeonLogicalAndWorkload.hpp"
46#include "workloads/NeonLogicalNotWorkload.hpp"
47#include "workloads/NeonLogicalOrWorkload.hpp"
Jan Eilersad5293a2019-07-08 09:57:55 +010048#include "workloads/NeonLstmFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000049#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000050#include "workloads/NeonMeanWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000051#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000052#include "workloads/NeonMultiplicationWorkload.hpp"
Pablo Telloe61f0712020-01-23 10:37:17 +000053#include "workloads/NeonDivisionWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000054#include "workloads/NeonNegWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010055#include "workloads/NeonNormalizationFloatWorkload.hpp"
56#include "workloads/NeonFullyConnectedWorkload.hpp"
Teresa Charlinf540eb82020-04-10 19:24:55 +010057#include "workloads/NeonGatherWorkload.hpp"
Teresa Charlinbd22c7d2022-04-26 18:14:12 +010058#include "workloads/NeonGatherNdWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000059#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010060#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010061#include "workloads/NeonPooling2dWorkload.hpp"
Ryan OShea19e79422022-05-04 00:38:03 +010062#include "workloads/NeonPooling3dWorkload.hpp"
Nikhil Raj9b461482019-07-03 15:58:31 +010063#include "workloads/NeonPreluWorkload.hpp"
James Conroycc340932020-05-12 18:08:52 +010064#include "workloads/NeonQLstmWorkload.hpp"
Sadik Armaganfabc2892019-05-31 09:05:11 +010065#include "workloads/NeonQuantizeWorkload.hpp"
Francis Murtagh4fc3c482019-08-02 13:20:54 +010066#include "workloads/NeonQuantizedLstmWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000067#include "workloads/NeonReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000068#include "workloads/NeonReshapeWorkload.hpp"
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +010069#include "workloads/NeonResizeWorkload.hpp"
Aron Virginas-Tar0dd3b432019-09-10 13:55:09 +010070#include "workloads/NeonRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010071#include "workloads/NeonSinWorkload.hpp"
josh minor036f02d2019-11-15 14:53:22 -060072#include "workloads/NeonSliceWorkload.hpp"
Sadik Armaganbe88a572020-04-30 11:39:37 +010073#include "workloads/NeonSoftmaxWorkload.hpp"
Mike Kelly0be3a882020-01-24 11:27:50 +000074#include "workloads/NeonSpaceToBatchNdWorkload.hpp"
Ellen Norris-Thompson29794572019-06-26 16:40:36 +010075#include "workloads/NeonSpaceToDepthWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010076#include "workloads/NeonSplitterWorkload.hpp"
Teresa Charlin06145cc2022-05-05 15:31:30 +010077#include "workloads/NeonSqrtWorkload.hpp"
Matthew Jackson87f65ea2019-08-01 10:01:34 +010078#include "workloads/NeonStackWorkload.hpp"
FinnWilliamsArm1fa19192019-08-02 17:26:31 +010079#include "workloads/NeonStridedSliceWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000080#include "workloads/NeonSubtractionWorkload.hpp"
Sadik Armagan581742d2019-08-12 14:11:37 +010081#include "workloads/NeonTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000082#include "workloads/NeonTransposeWorkload.hpp"
Cathal Corbettfd5bec42022-03-03 15:13:23 +000083#include "workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp"
Mike Kelly12994962022-04-21 11:57:09 +010084#include "workloads/NeonUnidirectionalSequenceLstmWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000085#endif
86
telsoa014fcda012018-03-09 14:13:49 +000087namespace armnn
88{
telsoa014fcda012018-03-09 14:13:49 +000089
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010090namespace
arovir017ff76c52018-10-09 09:40:58 +010091{
telsoa014fcda012018-03-09 14:13:49 +000092
Cathal Corbett80f71a82022-12-20 18:25:40 +000093const TensorInfo OverrideDataType(const TensorInfo& info, Optional<DataType> type)
94{
95 if (!type)
96 {
97 return info;
98 }
99 return TensorInfo(info.GetShape(),
100 type.value(),
101 info.GetQuantizationScale(),
102 info.GetQuantizationOffset(),
103 info.IsConstant());
104}
105
Derek Lamberti901ea112019-12-10 22:07:09 +0000106template< typename ... Args>
107bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000108{
Jan Eilers8eb25602020-03-09 12:13:48 +0000109 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000110#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000111 return true;
112#else
Derek Lamberti0790dce2019-04-15 18:37:35 +0100113 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +0000114 return false;
115#endif
116}
117
telsoa01c577f2c2018-08-31 09:22:23 +0100118template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100119bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000120 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100121 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000122 Uint8Func uint8FuncPtr,
123 Params&&... params)
124{
125 return IsNeonBackendSupported(reasonIfUnsupported) &&
126 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
127 dataType,
128 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100129 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000130 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000131 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000132 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000133 std::forward<Params>(params)...);
134}
135
Matteo Martincighd95e9062019-01-31 15:35:59 +0000136#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000137template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100138inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000139{
140 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
141 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
142 if (!supported && reasonIfUnsupported)
143 {
arovir01085f0a42018-10-08 14:48:19 +0100144 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000145 }
146 return supported;
147}
148
149#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
150 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
151#else
152#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000153 return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000154#endif
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100155} // anonymous namespace
156
Sadik Armagan045f6be2020-09-10 13:37:32 +0100157NeonLayerSupport::NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
158 : m_ModelContextPtr(modelContextPtr)
159{
160}
161
162NeonLayerSupport::NeonLayerSupport()
163 : m_ModelContextPtr(nullptr)
164{
165}
166
Cathal Corbett80f71a82022-12-20 18:25:40 +0000167bool IsLayerTypeSupported(const LayerType& type,
168 const std::vector<TensorInfo>& infos,
169 const BaseDescriptor& descriptor,
170 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
171 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
172 Optional<std::string&> reasonIfUnsupported,
173 const NeonLayerSupport& support)
Cathal Corbett34b429c2021-12-24 12:24:40 +0000174{
175 switch (type)
176 {
177 case LayerType::Activation:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000178 return support.IsActivationSupported(infos[0],
179 infos[1],
180 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
181 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000182 case LayerType::Addition:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000183 return support.IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000184 case LayerType::ArgMinMax:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000185 return support.IsArgMinMaxSupported(infos[0],
186 infos[1],
187 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
188 reasonIfUnsupported);
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100189 case LayerType::BatchMatMul:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000190 return support.IsBatchMatMulSupported(infos[0],
191 infos[1],
192 infos[2],
193 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
194 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000195 case LayerType::BatchNormalization:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000196 return support.IsBatchNormalizationSupported(infos[0],
197 infos[1],
198 infos[2],
199 infos[3],
200 infos[4],
201 infos[5],
202 *(PolymorphicDowncast<const
203 BatchNormalizationDescriptor*>(&descriptor)),
204 reasonIfUnsupported);
205 case LayerType::BatchToSpaceNd:
206 return support.IsBatchToSpaceNdSupported(infos[0],
207 infos[1],
208 *(PolymorphicDowncast<const
209 BatchToSpaceNdDescriptor*>(&descriptor)),
210 reasonIfUnsupported);
211 case LayerType::Cast:
212 return support.IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
213 case LayerType::ChannelShuffle:
214 return support.IsChannelShuffleSupported(infos[0],
215 infos[1],
216 *(PolymorphicDowncast<const
217 ChannelShuffleDescriptor*>(&descriptor)),
218 reasonIfUnsupported);
219 case LayerType::Comparison:
220 return support.IsComparisonSupported(infos[0],
Cathal Corbett34b429c2021-12-24 12:24:40 +0000221 infos[1],
222 infos[2],
Cathal Corbett80f71a82022-12-20 18:25:40 +0000223 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
Cathal Corbett34b429c2021-12-24 12:24:40 +0000224 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000225 case LayerType::Concat:
226 {
227 std::vector<const TensorInfo*> inputInfos;
228 for (uint32_t i = 0; i < (infos.size() - 1); i++)
229 {
230 inputInfos.push_back(&infos[i]);
231 }
Cathal Corbett80f71a82022-12-20 18:25:40 +0000232 return support.IsConcatSupported(inputInfos,
233 infos[infos.size() - 1],
234 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
235 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000236 }
237 case LayerType::Constant:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000238 return support.IsConstantSupported(infos[0], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000239 case LayerType::ConvertFp16ToFp32:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000240 return support.IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000241 case LayerType::ConvertFp32ToFp16:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000242 return support.IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000243 case LayerType::Convolution2d:
244 {
245 if (infos.size() != 4)
246 {
247 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
248 "TensorInfos should be of format: {input, output, weights, biases}.");
249 }
250
251 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
252 if (infos[3] == TensorInfo())
253 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000254 return support.IsConvolution2dSupported(infos[0],
255 infos[1],
256 desc,
257 infos[2],
258 EmptyOptional(),
259 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000260 }
261 else
262 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000263 return support.IsConvolution2dSupported(infos[0],
264 infos[1],
265 desc,
266 infos[2],
267 infos[3],
268 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000269 }
270 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000271 case LayerType::Convolution3d:
272 {
273 if (infos.size() != 4)
274 {
275 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
276 "TensorInfos should be of format: {input, output, weights, biases}.");
277 }
278
279 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
280 if (infos[3] == TensorInfo())
281 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000282 return support.IsConvolution3dSupported(infos[0],
283 infos[1],
284 desc,
285 infos[2],
286 EmptyOptional(),
287 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000288 }
289 else
290 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000291 return support.IsConvolution3dSupported(infos[0],
292 infos[1],
293 desc,
294 infos[2],
295 infos[3],
296 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000297 }
298 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000299 case LayerType::DepthToSpace:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000300 return support.IsDepthToSpaceSupported(infos[0],
301 infos[1],
302 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
303 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000304 case LayerType::DepthwiseConvolution2d:
305 {
306 if (infos.size() != 4)
307 {
308 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
309 "TensorInfos should be of format: {input, output, weights, biases}.");
310 }
311
312 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
313 if (infos[3] == TensorInfo())
314 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000315 return support.IsDepthwiseConvolutionSupported(infos[0],
316 infos[1],
317 desc,
318 infos[2],
319 EmptyOptional(),
320 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000321 }
322 else
323 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000324 return support.IsDepthwiseConvolutionSupported(infos[0],
325 infos[1],
326 desc,
327 infos[2],
328 infos[3],
329 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000330 }
331 }
332 case LayerType::Dequantize:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000333 return support.IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000334 case LayerType::DetectionPostProcess:
335 {
336 auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
Cathal Corbett80f71a82022-12-20 18:25:40 +0000337 return support.IsDetectionPostProcessSupported(infos[0],
338 infos[1],
339 infos[2],
340 infos[3],
341 infos[4],
342 infos[5],
343 infos[6],
344 desc,
345 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000346 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000347 case LayerType::Division:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000348 return support.IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000349 case LayerType::ElementwiseUnary:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000350 return support.IsElementwiseUnarySupported(infos[0],
351 infos[1],
352 *(PolymorphicDowncast<const
353 ElementwiseUnaryDescriptor*>(&descriptor)),
354 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000355 case LayerType::Fill:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000356 return support.IsFillSupported(infos[0],
357 infos[1],
358 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
359 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000360 case LayerType::Floor:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000361 return support.IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000362 case LayerType::FullyConnected:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000363 return support.IsFullyConnectedSupported(infos[0],
364 infos[1],
365 infos[2],
366 infos[3],
367 *(PolymorphicDowncast<const
368 FullyConnectedDescriptor*>(&descriptor)),
369 reasonIfUnsupported);
370 case LayerType::Gather:
371 return support.IsGatherSupported(infos[0],
Cathal Corbett34b429c2021-12-24 12:24:40 +0000372 infos[1],
373 infos[2],
Cathal Corbett80f71a82022-12-20 18:25:40 +0000374 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
Cathal Corbett34b429c2021-12-24 12:24:40 +0000375 reasonIfUnsupported);
Teresa Charlinbd22c7d2022-04-26 18:14:12 +0100376 case LayerType::GatherNd:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000377 return support.IsGatherNdSupported(infos[0],
378 infos[1],
379 infos[2],
380 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000381 case LayerType::Input:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000382 return support.IsInputSupported(infos[0], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000383 case LayerType::InstanceNormalization:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000384 return support.IsInstanceNormalizationSupported(infos[0],
385 infos[1],
386 *(PolymorphicDowncast<const
387 InstanceNormalizationDescriptor*>(&descriptor)),
388 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000389 case LayerType::L2Normalization:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000390 return support.IsL2NormalizationSupported(infos[0],
391 infos[1],
392 *(PolymorphicDowncast<const
393 L2NormalizationDescriptor*>(&descriptor)),
394 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000395 case LayerType::LogicalBinary:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000396 return support.IsLogicalBinarySupported(infos[0],
397 infos[1],
398 infos[2],
399 *(PolymorphicDowncast<const
400 LogicalBinaryDescriptor*>(&descriptor)),
401 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000402 case LayerType::LogSoftmax:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000403 return support.IsLogSoftmaxSupported(infos[0],
404 infos[1],
405 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
406 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000407 case LayerType::Lstm:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000408 return support.IsLstmSupported(infos[0],
409 infos[1],
410 infos[2],
411 infos[3],
412 infos[4],
413 infos[5],
414 infos[6],
415 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
416 lstmParamsInfo.value(),
417 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000418 case LayerType::Map:
419 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000420 case LayerType::Maximum:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000421 return support.IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000422 case LayerType::Mean:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000423 return support.IsMeanSupported(infos[0],
424 infos[1],
425 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
426 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000427 case LayerType::MemCopy:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000428 return support.IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000429 case LayerType::MemImport:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000430 return support.IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000431 case LayerType::Merge:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000432 return support.IsMergeSupported(infos[0],
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000433 infos[1],
434 infos[2],
435 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000436 case LayerType::Minimum:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000437 return support.IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000438 case LayerType::Multiplication:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000439 return support.IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000440 case LayerType::Normalization:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000441 return support.IsNormalizationSupported(infos[0],
442 infos[1],
443 *(PolymorphicDowncast<const
444 NormalizationDescriptor*>(&descriptor)),
445 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000446 case LayerType::Output:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000447 return support.IsOutputSupported(infos[0], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000448 case LayerType::Pad:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000449 return support.IsPadSupported(infos[0],
450 infos[1],
451 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
452 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000453 case LayerType::Permute:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000454 return support.IsPermuteSupported(infos[0],
455 infos[1],
456 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
457 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000458 case LayerType::Pooling2d:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000459 return support.IsPooling2dSupported(infos[0],
460 infos[1],
461 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
462 reasonIfUnsupported);
Ryan OShea19e79422022-05-04 00:38:03 +0100463 case LayerType::Pooling3d:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000464 return support.IsPooling3dSupported(infos[0],
465 infos[1],
466 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
467 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000468 case LayerType::Prelu:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000469 return support.IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000470 case LayerType::QLstm:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000471 return support.IsQLstmSupported(infos[0],
Cathal Corbett34b429c2021-12-24 12:24:40 +0000472 infos[1],
473 infos[2],
474 infos[3],
475 infos[4],
Cathal Corbett80f71a82022-12-20 18:25:40 +0000476 infos[5],
477 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
478 lstmParamsInfo.value(),
Cathal Corbett34b429c2021-12-24 12:24:40 +0000479 reasonIfUnsupported);
Cathal Corbett80f71a82022-12-20 18:25:40 +0000480 case LayerType::Quantize:
481 return support.IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
482 case LayerType::QuantizedLstm:
483 return support.IsQuantizedLstmSupported(infos[0],
484 infos[1],
485 infos[2],
486 infos[3],
487 infos[4],
488 quantizedLstmParamsInfo.value(),
489 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000490 case LayerType::Rank:
491 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000492 case LayerType::Reshape:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000493 return support.IsReshapeSupported(infos[0],
494 infos[1],
495 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
496 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000497 case LayerType::Resize:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000498 return support.IsResizeSupported(infos[0],
Cathal Corbett34b429c2021-12-24 12:24:40 +0000499 infos[1],
Cathal Corbett80f71a82022-12-20 18:25:40 +0000500 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
Cathal Corbett34b429c2021-12-24 12:24:40 +0000501 reasonIfUnsupported);
Cathal Corbett80f71a82022-12-20 18:25:40 +0000502 case LayerType::Reduce:
503 return support.IsReduceSupported(infos[0],
504 infos[1],
505 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
506 reasonIfUnsupported);
507 case LayerType::Shape:
508 return support.IsShapeSupported(infos[0],
509 infos[1],
510 reasonIfUnsupported);
511 case LayerType::Slice:
512 return support.IsSliceSupported(infos[0],
513 infos[1],
514 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
515 reasonIfUnsupported);
516 case LayerType::Softmax:
517 return support.IsSoftmaxSupported(infos[0],
518 infos[1],
519 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
520 reasonIfUnsupported);
521 case LayerType::SpaceToBatchNd:
522 return support.IsSpaceToBatchNdSupported(infos[0],
523 infos[1],
524 *(PolymorphicDowncast<const
525 SpaceToBatchNdDescriptor*>(&descriptor)),
526 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000527 case LayerType::SpaceToDepth:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000528 return support.IsSpaceToDepthSupported(infos[0],
529 infos[1],
530 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
531 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000532 case LayerType::Splitter:
533 {
534 std::vector<TensorInfo> outputInfos;
535 for (uint32_t i = 1; i < infos.size(); i++)
536 {
537 outputInfos.push_back(infos[i]);
538 }
Cathal Corbett80f71a82022-12-20 18:25:40 +0000539 return support.IsSplitterSupported(infos[0],
540 {outputInfos.begin(), outputInfos.end()},
541 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
542 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000543 }
544 case LayerType::Stack:
545 {
546 std::vector<const TensorInfo*> inputInfos;
547 for (uint32_t i = 0; i < infos.size() - 1; i++)
548 {
549 inputInfos.push_back(&infos[i]);
550 }
Cathal Corbett80f71a82022-12-20 18:25:40 +0000551 return support.IsStackSupported(inputInfos,
552 infos[infos.size() - 1],
553 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
554 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000555 }
556 case LayerType::StridedSlice:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000557 return support.IsStridedSliceSupported(infos[0],
558 infos[1],
559 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
560 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000561 case LayerType::Subtraction:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000562 return support.IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000563 case LayerType::Transpose:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000564 return support.IsTransposeSupported(infos[0],
565 infos[1],
566 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
567 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000568 case LayerType::TransposeConvolution2d:
569 {
570 if (infos.size() != 4)
571 {
572 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
573 "TensorInfos should be of format: {input, output, weights, biases}.");
574 }
575
576 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
577 if (infos[3] == TensorInfo())
578 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000579 return support.IsTransposeConvolution2dSupported(infos[0],
580 infos[1],
581 desc,
582 infos[2],
583 EmptyOptional(),
584 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000585 }
586 else
587 {
Cathal Corbett80f71a82022-12-20 18:25:40 +0000588 return support.IsTransposeConvolution2dSupported(infos[0],
589 infos[1],
590 desc,
591 infos[2],
592 infos[3],
593 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000594 }
595 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000596 case LayerType::UnidirectionalSequenceLstm:
Cathal Corbett80f71a82022-12-20 18:25:40 +0000597 {
598 auto desc = *(PolymorphicDowncast<const UnidirectionalSequenceLstmDescriptor*>(&descriptor));
599 return support.IsUnidirectionalSequenceLstmSupported(infos[0],
600 infos[1],
601 infos[2],
602 infos[3],
603 infos[4],
604 infos[5],
605 desc,
606 lstmParamsInfo.value(),
607 reasonIfUnsupported);
608 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000609 case LayerType::Unmap:
610 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000611 default:
612 // layers not supported in neon by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000613 // debug, fakequantization, precompiled,
Ryan OShea19e79422022-05-04 00:38:03 +0100614 // standin, switch
Cathal Corbett34b429c2021-12-24 12:24:40 +0000615 return false;
616 }
617}
618
Cathal Corbett80f71a82022-12-20 18:25:40 +0000619bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
620 const std::vector<TensorInfo>& infos,
621 const BaseDescriptor& descriptor,
622 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
623 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
624 Optional<std::string&> reasonIfUnsupported) const
625{
626 bool isSupported = IsLayerTypeSupported(type,
627 infos,
628 descriptor,
629 lstmParamsInfo,
630 quantizedLstmParamsInfo,
631 reasonIfUnsupported,
632 *this);
633
634 // For android-nn-driver and support library, to run FP16 operations on CpuAcc we need at least v8.2
635 // architecture. If the available architecture is older than v8.2, we can check if the operator is
636 // supported by changing operator inputs & outputs to be FP32.
637 // This does not change the operator datatype in the above parsers to be FP32. We are simply reporting
638 // to the parsers if the operator can supported in ArmNN. We will then re-enter ArmNN (Network.cpp)
639 // where we will recheck IsLayerSupported() on the FP16 datatype, update the operator to be FP32,
640 // and, insert convert layers around the FP32 operator.
641 if (reasonIfUnsupported.has_value())
642 {
643 std::string checkStr = "This CPU architecture does not support F16 data type, you need v8.2 or above";
644 if (!isSupported
645 && reasonIfUnsupported.value().find(checkStr) != std::string::npos)
646 {
647 std::vector<TensorInfo> newInfos;
648 for (auto info: infos)
649 {
650 newInfos.emplace_back(OverrideDataType(info, DataType::Float32));
651 }
652
653 std::string tmpString;
654 return IsLayerTypeSupported(type,
655 newInfos,
656 descriptor,
657 lstmParamsInfo,
658 quantizedLstmParamsInfo,
659 tmpString,
660 *this);
661 }
662 }
663
664 return isSupported;
665}
666
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100667bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
668 const TensorInfo& output,
669 const ActivationDescriptor& descriptor,
670 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000671{
Jan Eilers8eb25602020-03-09 12:13:48 +0000672 IgnoreUnused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100673 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
674 reasonIfUnsupported,
675 input,
676 output,
677 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000678}
679
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100680bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
681 const TensorInfo& input1,
682 const TensorInfo& output,
683 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000684{
telsoa01c577f2c2018-08-31 09:22:23 +0100685 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
686 reasonIfUnsupported,
687 input0,
688 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000689 output,
690 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000691}
692
James Conroyd47a0642019-09-17 14:22:06 +0100693bool NeonLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
694 const TensorInfo& output,
695 const ArgMinMaxDescriptor& descriptor,
696 Optional<std::string&> reasonIfUnsupported) const
697{
698 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonArgMinMaxWorkloadValidate,
699 reasonIfUnsupported,
700 input,
701 output,
702 descriptor);
703}
704
Teresa Charlin0f86ecf2022-10-13 15:47:08 +0100705bool NeonLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX,
706 const TensorInfo& inputY,
707 const TensorInfo& output,
708 const BatchMatMulDescriptor& descriptor,
709 Optional<std::string&> reasonIfUnsupported) const
710{
711 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchMatMulValidate,
712 reasonIfUnsupported,
713 inputX,
714 inputY,
715 output,
716 descriptor);
717}
718
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100719bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
720 const TensorInfo& output,
721 const TensorInfo& mean,
722 const TensorInfo& var,
723 const TensorInfo& beta,
724 const TensorInfo& gamma,
725 const BatchNormalizationDescriptor& descriptor,
726 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000727{
telsoa01c577f2c2018-08-31 09:22:23 +0100728 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
729 reasonIfUnsupported,
730 input,
731 output,
732 mean,
733 var,
734 beta,
735 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000736 descriptor,
737 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000738}
739
Mike Kelly56858022020-01-27 12:14:47 +0000740bool NeonLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
741 const TensorInfo& output,
742 const BatchToSpaceNdDescriptor& descriptor,
743 Optional<std::string&> reasonIfUnsupported) const
744{
745 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchToSpaceNdWorkloadValidate,
746 reasonIfUnsupported,
747 input,
748 output,
749 descriptor);
750}
751
Sadik Armagan48f011e2021-04-21 10:50:34 +0100752bool NeonLayerSupport::IsCastSupported(const TensorInfo& input,
753 const TensorInfo& output,
754 Optional<std::string&> reasonIfUnsupported) const
755{
756 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonCastValidate,
757 reasonIfUnsupported,
758 input,
759 output);
760}
761
Teresa Charline89dd692021-09-01 16:30:34 +0100762bool NeonLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
763 const TensorInfo& output,
764 const ChannelShuffleDescriptor& descriptor,
765 Optional<std::string&> reasonIfUnsupported) const
766{
767 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonChannelShuffleValidate,
768 reasonIfUnsupported,
769 input,
770 output,
771 descriptor);
772}
773
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100774bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
775 const TensorInfo& input1,
776 const TensorInfo& output,
777 const ComparisonDescriptor& descriptor,
778 Optional<std::string&> reasonIfUnsupported) const
779{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100780
Teresa Charlincedd34f2020-03-30 11:17:30 +0100781 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonComparisonWorkloadValidate,
782 reasonIfUnsupported,
783 input0,
784 input1,
785 output,
786 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100787}
788
Jim Flynn906f9462019-05-10 13:55:21 +0100789bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
790 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000791 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100792 Optional<std::string&> reasonIfUnsupported) const
793{
Jim Flynne242f2d2019-05-22 14:24:13 +0100794 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
795 {
796 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
797 return false;
798 }
799
800 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
801 if(concatInnerAxis < 3) // Width, height, or channels
802 {
803 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
804 reasonIfUnsupported,
805 inputs,
806 output,
807 descriptor);
808 }
809 else if (concatInnerAxis == 3)
810 {
811 for (auto& input : inputs)
812 {
813 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
814 {
815 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
816 return false;
817 }
818 }
819 return true; // Sub-tensors support concat along batch
820 }
821 else // > 4 dimensions not supported.
822 {
823 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
824 return false;
825 }
Jim Flynn906f9462019-05-10 13:55:21 +0100826}
827
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100828bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
829 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000830{
Mike Kelly0886ac42020-04-27 09:55:40 +0100831 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
832 reasonIfUnsupported,
833 output);
telsoa014fcda012018-03-09 14:13:49 +0000834}
835
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100836bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
837 const TensorInfo& output,
838 Optional<std::string&> reasonIfUnsupported) const
839{
Jan Eilers8eb25602020-03-09 12:13:48 +0000840 armnn::IgnoreUnused(input);
841 armnn::IgnoreUnused(output);
842 armnn::IgnoreUnused(reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100843 return true;
844}
845
846bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
847 const TensorInfo& output,
848 Optional<std::string&> reasonIfUnsupported) const
849{
Jan Eilers8eb25602020-03-09 12:13:48 +0000850 armnn::IgnoreUnused(input);
851 armnn::IgnoreUnused(output);
852 armnn::IgnoreUnused(reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100853 return true;
854}
855
856bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
857 const TensorInfo& output,
858 const Convolution2dDescriptor& descriptor,
859 const TensorInfo& weights,
860 const Optional<TensorInfo>& biases,
861 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000862{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100863 bool isFastMathEnabled = false;
864#if defined(ARMCOMPUTENEON_ENABLED)
865 if (m_ModelContextPtr)
866 {
867 if (m_ModelContextPtr.get() != nullptr)
868 {
Sadik Armagan04a72972020-09-14 15:44:18 +0100869 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
Sadik Armagan045f6be2020-09-10 13:37:32 +0100870 if (modelOptions)
871 {
872 isFastMathEnabled = modelOptions->IsFastMathEnabled();
873 }
874 }
875 }
876#endif
877
surmeh013537c2c2018-05-18 16:31:43 +0100878 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
879 reasonIfUnsupported,
880 input,
881 output,
882 descriptor,
883 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100884 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000885 isFastMathEnabled,
886 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000887}
888
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100889bool NeonLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
890 const TensorInfo& output,
891 const Convolution3dDescriptor& descriptor,
892 const TensorInfo& weights,
893 const Optional<TensorInfo>& biases,
894 Optional<std::string&> reasonIfUnsupported) const
895{
896 bool isFastMathEnabled = false;
897#if defined(ARMCOMPUTENEON_ENABLED)
898 if (m_ModelContextPtr)
899 {
900 if (m_ModelContextPtr.get() != nullptr)
901 {
902 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
903 if (modelOptions)
904 {
905 isFastMathEnabled = modelOptions->IsFastMathEnabled();
906 }
907 }
908 }
909#endif
910
911 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution3dWorkloadValidate,
912 reasonIfUnsupported,
913 input,
914 output,
915 descriptor,
916 weights,
917 biases,
918 isFastMathEnabled,
919 nullptr);
920}
921
Aron Virginas-Tar2f00b742019-09-30 13:28:08 +0100922bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
923 const TensorInfo& output,
924 const DepthToSpaceDescriptor& descriptor,
925 Optional<std::string&> reasonIfUnsupported) const
926{
927 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthToSpaceWorkloadValidate,
928 reasonIfUnsupported,
929 input,
930 output,
931 descriptor);
932}
933
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100934bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
935 const TensorInfo& output,
936 const DepthwiseConvolution2dDescriptor& descriptor,
937 const TensorInfo& weights,
938 const Optional<TensorInfo>& biases,
939 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000940{
telsoa01c577f2c2018-08-31 09:22:23 +0100941 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
942 reasonIfUnsupported,
943 input,
944 output,
945 descriptor,
946 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000947 biases,
948 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000949}
950
Narumol Prangnawarat01961a72019-05-30 16:47:12 +0100951bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
952 const TensorInfo& output,
953 Optional<std::string&> reasonIfUnsupported) const
954{
955 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
956 reasonIfUnsupported,
957 input,
958 output);
959}
960
Pablo Tellof0bd6832019-04-26 17:58:13 +0100961bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
962 const TensorInfo& output,
963 const DepthwiseConvolution2dDescriptor& descriptor,
964 const TensorInfo& weights,
965 const Optional<TensorInfo>& biases,
966 Optional<std::string&> reasonIfUnsupported) const
967{
968 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
969 reasonIfUnsupported,
970 input,
971 output,
972 descriptor,
973 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000974 biases,
975 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100976}
977
josh minor4a3c6102020-01-06 16:40:46 -0600978bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
979 const TensorInfo& output,
980 const ElementwiseUnaryDescriptor& descriptor,
981 Optional<std::string&> reasonIfUnsupported) const
982{
Derek Lambertic77874a2020-04-28 13:34:56 +0100983 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600984 {
Derek Lambertic77874a2020-04-28 13:34:56 +0100985 case UnaryOperation::Abs:
986 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
987 reasonIfUnsupported,
988 input,
989 output);
990 case UnaryOperation::Exp:
991 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonExpWorkloadValidate,
992 reasonIfUnsupported,
993 input,
994 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100995 case UnaryOperation::LogicalNot:
996 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
997 reasonIfUnsupported,
998 input,
999 output);
1000 case UnaryOperation::Log:
1001 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogWorkloadValidate,
1002 reasonIfUnsupported,
1003 input,
1004 output);
Derek Lambertic77874a2020-04-28 13:34:56 +01001005 case UnaryOperation::Neg:
1006 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
1007 reasonIfUnsupported,
1008 input,
1009 output);
1010 case UnaryOperation::Rsqrt:
1011 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate,
1012 reasonIfUnsupported,
1013 input,
1014 output);
ryan.oshea3cff135b2021-10-07 15:28:14 +00001015 case UnaryOperation::Sin:
1016 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSinWorkloadValidate,
1017 reasonIfUnsupported,
1018 input,
1019 output);
Teresa Charlin06145cc2022-05-05 15:31:30 +01001020 case UnaryOperation::Sqrt:
1021 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSqrtWorkloadValidate,
1022 reasonIfUnsupported,
1023 input,
1024 output);
Derek Lambertic77874a2020-04-28 13:34:56 +01001025 default:
1026 return false;
josh minor4a3c6102020-01-06 16:40:46 -06001027 }
josh minor4a3c6102020-01-06 16:40:46 -06001028}
1029
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001030bool NeonLayerSupport::IsFillSupported(const TensorInfo& input,
1031 const TensorInfo& output,
1032 const FillDescriptor& descriptor,
1033 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagana792a052020-06-23 16:22:23 +01001034{
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001035 armnn::IgnoreUnused(input);
1036 armnn::IgnoreUnused(output);
1037 armnn::IgnoreUnused(descriptor);
1038
1039 return IsNeonBackendSupported(reasonIfUnsupported);
Sadik Armagana792a052020-06-23 16:22:23 +01001040}
1041
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001042bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
1043 const TensorInfo& output,
1044 Optional<std::string&> reasonIfUnsupported) const
1045{
Jan Eilers8eb25602020-03-09 12:13:48 +00001046 armnn::IgnoreUnused(output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001047 return IsNeonBackendSupported(reasonIfUnsupported) &&
1048 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
1049 input.GetDataType(),
1050 &FalseFuncF16<>,
1051 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +00001052 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +00001053 &FalseFuncI32<>,
1054 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001055}
1056
1057bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
1058 const TensorInfo& output,
1059 const TensorInfo& weights,
1060 const TensorInfo& biases,
1061 const FullyConnectedDescriptor& descriptor,
1062 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001063{
telsoa01c577f2c2018-08-31 09:22:23 +01001064 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
1065 reasonIfUnsupported,
1066 input,
1067 output,
1068 weights,
1069 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001070 descriptor,
1071 nullptr);
telsoa014fcda012018-03-09 14:13:49 +00001072}
1073
Teresa Charlinf540eb82020-04-10 19:24:55 +01001074bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
1075 const TensorInfo& input1,
1076 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001077 const GatherDescriptor& descriptor,
Teresa Charlinf540eb82020-04-10 19:24:55 +01001078 Optional<std::string&> reasonIfUnsupported) const
1079{
1080 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate,
1081 reasonIfUnsupported,
1082 input0,
1083 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001084 output,
1085 descriptor);
Teresa Charlinf540eb82020-04-10 19:24:55 +01001086}
1087
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001088bool NeonLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1089 const TensorInfo& input1,
1090 const TensorInfo& output,
1091 Optional<std::string&> reasonIfUnsupported) const
1092{
1093 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherNdWorkloadValidate,
1094 reasonIfUnsupported,
1095 input0,
1096 input1,
1097 output);
1098}
1099
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001100bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
1101 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001102{
Derek Lamberti901ea112019-12-10 22:07:09 +00001103 return IsNeonBackendSupported(reasonIfUnsupported, input);
telsoa014fcda012018-03-09 14:13:49 +00001104}
1105
Sadik Armagan0d4863d2019-10-09 14:26:32 +01001106bool NeonLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1107 const TensorInfo& output,
1108 const InstanceNormalizationDescriptor& descriptor,
1109 Optional<std::string&> reasonIfUnsupported) const
1110{
1111 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonInstanceNormalizationWorkloadValidate,
1112 reasonIfUnsupported,
1113 input,
1114 output,
1115 descriptor);
1116}
1117
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001118bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1119 const TensorInfo& output,
1120 const L2NormalizationDescriptor& descriptor,
1121 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001122{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001123 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001124}
1125
James Conroy177df1e2020-11-13 10:18:51 +00001126bool NeonLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1127 const TensorInfo& input1,
1128 const TensorInfo& output,
1129 const LogicalBinaryDescriptor& descriptor,
1130 Optional<std::string&> reasonIfUnsupported) const
1131{
1132 switch(descriptor.m_Operation)
1133 {
1134 case LogicalBinaryOperation::LogicalAnd:
1135 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalAndWorkloadValidate,
1136 reasonIfUnsupported,
1137 input0,
1138 input1,
1139 output);
1140 case LogicalBinaryOperation::LogicalOr:
1141 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalOrWorkloadValidate,
1142 reasonIfUnsupported,
1143 input0,
1144 input1,
1145 output);
1146 default:
1147 return false;
1148 }
1149}
1150
Keith Davis69e653f2020-07-02 11:49:26 +01001151bool NeonLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1152 const TensorInfo& output,
1153 const LogSoftmaxDescriptor& descriptor,
1154 Optional<std::string&> reasonIfUnsupported) const
1155{
1156 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1157}
1158
Jan Eilersad5293a2019-07-08 09:57:55 +01001159bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
1160 const TensorInfo& outputStateIn,
1161 const TensorInfo& cellStateIn,
1162 const TensorInfo& scratchBuffer,
1163 const TensorInfo& outputStateOut,
1164 const TensorInfo& cellStateOut,
1165 const TensorInfo& output,
1166 const LstmDescriptor& descriptor,
1167 const LstmInputParamsInfo& paramsInfo,
1168 Optional<std::string&> reasonIfUnsupported) const
1169{
1170 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLstmFloatWorkloadValidate,
1171 reasonIfUnsupported,
1172 input,
1173 outputStateIn,
1174 cellStateIn,
1175 scratchBuffer,
1176 outputStateOut,
1177 cellStateOut,
1178 output,
1179 descriptor,
1180 paramsInfo);
1181}
1182
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +00001183bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1184 const TensorInfo& input1,
1185 const TensorInfo& output,
1186 Optional<std::string&> reasonIfUnsupported) const
1187{
1188 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
1189 reasonIfUnsupported,
1190 input0,
1191 input1,
1192 output);
1193}
1194
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001195bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
1196 const TensorInfo& output,
1197 const MeanDescriptor& descriptor,
1198 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001199{
Matthew Benthamfd899962018-12-31 15:49:42 +00001200 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
1201 reasonIfUnsupported,
1202 input,
1203 output,
1204 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001205}
1206
Conor Kennedy54b21692019-01-09 07:57:38 +00001207bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1208 const TensorInfo& input1,
1209 const TensorInfo& output,
1210 Optional<std::string&> reasonIfUnsupported) const
1211{
1212 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
1213 reasonIfUnsupported,
1214 input0,
1215 input1,
1216 output);
1217}
1218
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001219bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1220 const TensorInfo& input1,
1221 const TensorInfo& output,
1222 Optional<std::string&> reasonIfUnsupported) const
1223{
1224 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
1225 reasonIfUnsupported,
1226 input0,
1227 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001228 output,
1229 nullptr);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001230}
1231
Pablo Telloe61f0712020-01-23 10:37:17 +00001232bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
1233 const TensorInfo& input1,
1234 const TensorInfo& output,
1235 Optional<std::string&> reasonIfUnsupported) const
1236{
1237 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
1238 reasonIfUnsupported,
1239 input0,
1240 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001241 output,
1242 nullptr);
Pablo Telloe61f0712020-01-23 10:37:17 +00001243}
1244
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001245bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1246 const TensorInfo& output,
1247 const NormalizationDescriptor& descriptor,
1248 Optional<std::string&> reasonIfUnsupported) const
1249{
1250 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
1251 reasonIfUnsupported,
1252 input,
1253 output,
1254 descriptor);
1255}
1256
1257bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
1258 Optional<std::string&> reasonIfUnsupported) const
1259{
Derek Lamberti901ea112019-12-10 22:07:09 +00001260 return IsNeonBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001261}
1262
Éanna Ó Catháin12055742019-01-25 10:01:40 +00001263bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
1264 const TensorInfo& output,
1265 const PadDescriptor& descriptor,
1266 Optional<std::string&> reasonIfUnsupported) const
1267{
1268 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
1269 reasonIfUnsupported,
1270 input,
1271 output,
1272 descriptor);
1273}
1274
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001275bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
1276 const TensorInfo& output,
1277 const PermuteDescriptor& descriptor,
1278 Optional<std::string&> reasonIfUnsupported) const
1279{
1280 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001281}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001282
1283bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1284 const TensorInfo& output,
1285 const Pooling2dDescriptor& descriptor,
1286 Optional<std::string&> reasonIfUnsupported) const
1287{
1288 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1289}
1290
Ryan OShea19e79422022-05-04 00:38:03 +01001291bool NeonLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1292 const TensorInfo& output,
1293 const Pooling3dDescriptor& descriptor,
1294 Optional<std::string&> reasonIfUnsupported) const
1295{
1296 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1297}
1298
Nikhil Raj9b461482019-07-03 15:58:31 +01001299bool NeonLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1300 const armnn::TensorInfo &alpha,
1301 const armnn::TensorInfo &output,
1302 armnn::Optional<std::string &> reasonIfUnsupported) const
1303{
1304 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1305}
1306
James Conroycc340932020-05-12 18:08:52 +01001307bool NeonLayerSupport::IsQLstmSupported(const TensorInfo& input,
1308 const TensorInfo& previousOutputIn,
1309 const TensorInfo& previousCellStateIn,
1310 const TensorInfo& outputStateOut,
1311 const TensorInfo& cellStateOut,
1312 const TensorInfo& output,
1313 const QLstmDescriptor& descriptor,
1314 const LstmInputParamsInfo& paramsInfo,
1315 Optional<std::string&> reasonIfUnsupported) const
1316{
1317 // Check required here in order to pass IsLayerSupported for datatypes tests
1318 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1319 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1320 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1321 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1322 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1323 output.GetDataType() == armnn::DataType::QAsymmS8)
1324 {
1325 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQLstmWorkloadValidate,
1326 reasonIfUnsupported,
1327 input,
1328 previousCellStateIn,
1329 previousOutputIn,
1330 cellStateOut,
1331 outputStateOut,
1332 output,
1333 descriptor,
1334 paramsInfo);
1335 }
1336 else
1337 {
1338 return false;
1339 }
1340}
1341
Sadik Armaganfabc2892019-05-31 09:05:11 +01001342bool NeonLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1343 const TensorInfo& output,
1344 Optional<std::string&> reasonIfUnsupported) const
1345{
1346 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate,
1347 reasonIfUnsupported,
1348 input,
1349 output);
1350}
1351
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001352bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1353 const TensorInfo& cellStateIn,
1354 const TensorInfo& outputStateIn,
1355 const TensorInfo& cellStateOut,
1356 const TensorInfo& outputStateOut,
1357 const QuantizedLstmInputParamsInfo& paramsInfo,
1358 Optional<std::string&> reasonIfUnsupported) const
1359{
1360 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizedLstmWorkloadValidate,
1361 reasonIfUnsupported,
1362 input,
1363 cellStateIn,
1364 outputStateIn,
1365 cellStateOut,
1366 outputStateOut,
1367 paramsInfo);
1368}
1369
Sadik Armagana2747482021-02-09 10:28:54 +00001370bool NeonLayerSupport::IsReduceSupported(const TensorInfo& input,
1371 const TensorInfo& output,
1372 const ReduceDescriptor& descriptor,
1373 Optional<std::string&> reasonIfUnsupported) const
1374{
1375 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReduceWorkloadValidate,
1376 reasonIfUnsupported,
1377 input,
1378 output,
1379 descriptor);
1380}
1381
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001382bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001383 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001384 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001385 Optional<std::string&> reasonIfUnsupported) const
1386{
Jan Eilers8eb25602020-03-09 12:13:48 +00001387 armnn::IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001388 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
1389 reasonIfUnsupported,
1390 input,
1391 output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001392}
1393
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001394bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
1395 const TensorInfo& output,
1396 const ResizeDescriptor& descriptor,
1397 Optional<std::string&> reasonIfUnsupported) const
1398{
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +01001399 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
1400 reasonIfUnsupported,
1401 input,
1402 output,
1403 descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001404}
1405
josh minor036f02d2019-11-15 14:53:22 -06001406bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
1407 const TensorInfo& output,
1408 const SliceDescriptor& descriptor,
1409 Optional<std::string&> reasonIfUnsupported) const
1410{
1411 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSliceWorkloadValidate,
1412 reasonIfUnsupported,
1413 input,
1414 output,
1415 descriptor);
1416}
1417
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001418bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1419 const TensorInfo& output,
1420 const SoftmaxDescriptor& descriptor,
1421 Optional<std::string&> reasonIfUnsupported) const
1422{
1423 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1424}
1425
Mike Kelly0be3a882020-01-24 11:27:50 +00001426bool NeonLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1427 const TensorInfo& output,
1428 const SpaceToBatchNdDescriptor& descriptor,
1429 Optional<std::string&> reasonIfUnsupported) const
1430{
1431 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToBatchNdWorkloadValidate,
1432 reasonIfUnsupported,
1433 input,
1434 output,
1435 descriptor);
1436}
1437
Ellen Norris-Thompson29794572019-06-26 16:40:36 +01001438bool NeonLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
Mike Kelly0be3a882020-01-24 11:27:50 +00001439 const TensorInfo& output,
1440 const SpaceToDepthDescriptor& descriptor,
1441 Optional<std::string&> reasonIfUnsupported) const
Ellen Norris-Thompson29794572019-06-26 16:40:36 +01001442{
1443 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToDepthWorkloadValidate,
1444 reasonIfUnsupported,
1445 input,
1446 output,
1447 descriptor);
1448}
1449
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001450bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001451 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1452 const ViewsDescriptor& descriptor,
1453 Optional<std::string&> reasonIfUnsupported) const
1454{
1455#if defined(ARMCOMPUTENEON_ENABLED)
1456 // Split along the last dimension, cannot use sub-tensors
1457 // as width and height of the sub-tensors do not match
1458 // the width and height of the parent tensor
1459 // in case of input with more than 2D.
1460 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1461 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1462 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1463 {
1464 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
1465 reasonIfUnsupported,
1466 input,
1467 outputs,
1468 *splitAxis.begin());
1469 }
1470#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001471 IgnoreUnused(descriptor);
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001472 for (auto output : outputs)
1473 {
1474 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1475 {
1476 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1477 return false;
1478 }
1479 }
1480 return true;
1481}
1482
Matthew Jackson87f65ea2019-08-01 10:01:34 +01001483bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1484 const TensorInfo& output,
1485 const StackDescriptor& descriptor,
1486 Optional<std::string&> reasonIfUnsupported) const
1487{
1488 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStackWorkloadValidate,
1489 reasonIfUnsupported,
1490 inputs,
1491 output,
1492 descriptor);
1493}
1494
FinnWilliamsArm1fa19192019-08-02 17:26:31 +01001495bool NeonLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1496 const TensorInfo& output,
1497 const StridedSliceDescriptor& descriptor,
1498 Optional<std::string&> reasonIfUnsupported) const
1499{
1500 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStridedSliceWorkloadValidate,
1501 reasonIfUnsupported,
1502 input,
1503 output,
1504 descriptor);
1505}
1506
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001507bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1508 const TensorInfo& input1,
1509 const TensorInfo& output,
1510 Optional<std::string&> reasonIfUnsupported) const
1511{
1512 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
1513 reasonIfUnsupported,
1514 input0,
1515 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001516 output,
1517 nullptr);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001518}
1519
Sadik Armagan581742d2019-08-12 14:11:37 +01001520bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1521 const TensorInfo& output,
1522 const TransposeConvolution2dDescriptor& descriptor,
1523 const TensorInfo& weights,
1524 const Optional<TensorInfo>& biases,
1525 Optional<std::string&> reasonIfUnsupported) const
1526{
1527 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeConvolution2dWorkloadValidate,
1528 reasonIfUnsupported,
1529 input,
1530 output,
1531 descriptor,
1532 weights,
1533 biases);
1534}
1535
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001536bool NeonLayerSupport::IsTransposeSupported(const TensorInfo& input,
1537 const TensorInfo& output,
1538 const TransposeDescriptor& descriptor,
1539 Optional<std::string&> reasonIfUnsupported) const
1540{
1541 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1542}
1543
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001544bool NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1545 const TensorInfo& outputStateIn,
1546 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001547 const TensorInfo& outputStateOut,
1548 const TensorInfo& cellStateOut,
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001549 const TensorInfo& output,
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001550 const UnidirectionalSequenceLstmDescriptor& descriptor,
1551 const LstmInputParamsInfo& paramsInfo,
1552 Optional<std::string&> reasonIfUnsupported) const
1553{
Mike Kelly12994962022-04-21 11:57:09 +01001554 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1555 outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1556 cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1557 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1558 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1559 output.GetDataType() == armnn::DataType::QAsymmS8)
1560 {
1561 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmWorkloadValidate,
1562 reasonIfUnsupported,
1563 input,
1564 outputStateIn,
1565 cellStateIn,
1566 outputStateOut,
1567 cellStateOut,
1568 output,
1569 descriptor,
1570 paramsInfo);
1571 }
1572 else
1573 {
1574 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmFloatWorkloadValidate,
1575 reasonIfUnsupported,
1576 input,
1577 outputStateIn,
1578 cellStateIn,
1579 outputStateOut,
1580 cellStateOut,
1581 output,
1582 descriptor,
1583 paramsInfo);
1584 }
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001585}
1586
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001587} // namespace armnn