blob: cf541f491b499c75a549ac77bcc6bfdc8fa182d3 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin52664732020-06-29 16:27:03 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "NeonLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "NeonBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "NeonBackendModelContext.hpp"
telsoa014fcda012018-03-09 14:13:49 +00009
Derek Lambertic77874a2020-04-28 13:34:56 +010010#include <armnn/Exceptions.hpp>
telsoa014fcda012018-03-09 14:13:49 +000011#include <armnn/Tensor.hpp>
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010012#include <armnn/Types.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +000013#include <armnn/BackendRegistry.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Matteo Martincighc601aa62019-10-29 15:03:22 +000015#include <InternalTypes.hpp>
16#include <LayerSupportCommon.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
Sadik Armagan045f6be2020-09-10 13:37:32 +010018#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000019
Matteo Martincighd95e9062019-01-31 15:35:59 +000020#if defined(ARMCOMPUTENEON_ENABLED)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010021#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000022#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar914e4db2019-09-09 13:36:45 +010023#include "workloads/NeonAbsWorkload.hpp"
Matthew Bentham955258d2018-12-10 10:48:52 +000024#include "workloads/NeonAdditionWorkload.hpp"
Nattapat Chaimanowongd4b70592018-10-12 11:21:49 +010025#include "workloads/NeonActivationWorkload.hpp"
James Conroyd47a0642019-09-17 14:22:06 +010026#include "workloads/NeonArgMinMaxWorkload.hpp"
Matthew Benthamc48ac8c2018-12-12 16:15:59 +000027#include "workloads/NeonBatchNormalizationWorkload.hpp"
Mike Kelly56858022020-01-27 12:14:47 +000028#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
Sadik Armagan48f011e2021-04-21 10:50:34 +010029#include "workloads/NeonCastWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010030#include "workloads/NeonChannelShuffleWorkload.hpp"
Teresa Charlincedd34f2020-03-30 11:17:30 +010031#include "workloads/NeonComparisonWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010032#include "workloads/NeonConcatWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010033#include "workloads/NeonConstantWorkload.hpp"
Nattapat Chaimanowong974b65f2018-10-15 15:07:34 +010034#include "workloads/NeonConvolution2dWorkload.hpp"
Teresa Charlinec5f7d12021-10-22 17:15:00 +010035#include "workloads/NeonConvolution3dWorkload.hpp"
Aron Virginas-Tar2f00b742019-09-30 13:28:08 +010036#include "workloads/NeonDepthToSpaceWorkload.hpp"
Nattapat Chaimanowong77140882018-10-17 11:12:19 +010037#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
Narumol Prangnawarat01961a72019-05-30 16:47:12 +010038#include "workloads/NeonDequantizeWorkload.hpp"
Teresa Charline89dd692021-09-01 16:30:34 +010039#include "workloads/NeonExpWorkload.hpp"
Sadik Armagan0d4863d2019-10-09 14:26:32 +010040#include "workloads/NeonInstanceNormalizationWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010041#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010042#include "workloads/NeonLogWorkload.hpp"
Keith Davis69e653f2020-07-02 11:49:26 +010043#include "workloads/NeonLogSoftmaxWorkload.hpp"
James Conroy177df1e2020-11-13 10:18:51 +000044#include "workloads/NeonLogicalAndWorkload.hpp"
45#include "workloads/NeonLogicalNotWorkload.hpp"
46#include "workloads/NeonLogicalOrWorkload.hpp"
Jan Eilersad5293a2019-07-08 09:57:55 +010047#include "workloads/NeonLstmFloatWorkload.hpp"
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +000048#include "workloads/NeonMaximumWorkload.hpp"
Matthew Benthamfd899962018-12-31 15:49:42 +000049#include "workloads/NeonMeanWorkload.hpp"
Conor Kennedy54b21692019-01-09 07:57:38 +000050#include "workloads/NeonMinimumWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000051#include "workloads/NeonMultiplicationWorkload.hpp"
Pablo Telloe61f0712020-01-23 10:37:17 +000052#include "workloads/NeonDivisionWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000053#include "workloads/NeonNegWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010054#include "workloads/NeonNormalizationFloatWorkload.hpp"
55#include "workloads/NeonFullyConnectedWorkload.hpp"
Teresa Charlinf540eb82020-04-10 19:24:55 +010056#include "workloads/NeonGatherWorkload.hpp"
Teresa Charlinbd22c7d2022-04-26 18:14:12 +010057#include "workloads/NeonGatherNdWorkload.hpp"
Éanna Ó Catháin12055742019-01-25 10:01:40 +000058#include "workloads/NeonPadWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +010059#include "workloads/NeonPermuteWorkload.hpp"
Nattapat Chaimanowong5d2e7002018-10-12 16:03:56 +010060#include "workloads/NeonPooling2dWorkload.hpp"
Ryan OShea19e79422022-05-04 00:38:03 +010061#include "workloads/NeonPooling3dWorkload.hpp"
Nikhil Raj9b461482019-07-03 15:58:31 +010062#include "workloads/NeonPreluWorkload.hpp"
James Conroycc340932020-05-12 18:08:52 +010063#include "workloads/NeonQLstmWorkload.hpp"
Sadik Armaganfabc2892019-05-31 09:05:11 +010064#include "workloads/NeonQuantizeWorkload.hpp"
Francis Murtagh4fc3c482019-08-02 13:20:54 +010065#include "workloads/NeonQuantizedLstmWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000066#include "workloads/NeonReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000067#include "workloads/NeonReshapeWorkload.hpp"
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +010068#include "workloads/NeonResizeWorkload.hpp"
Aron Virginas-Tar0dd3b432019-09-10 13:55:09 +010069#include "workloads/NeonRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010070#include "workloads/NeonSinWorkload.hpp"
josh minor036f02d2019-11-15 14:53:22 -060071#include "workloads/NeonSliceWorkload.hpp"
Sadik Armaganbe88a572020-04-30 11:39:37 +010072#include "workloads/NeonSoftmaxWorkload.hpp"
Mike Kelly0be3a882020-01-24 11:27:50 +000073#include "workloads/NeonSpaceToBatchNdWorkload.hpp"
Ellen Norris-Thompson29794572019-06-26 16:40:36 +010074#include "workloads/NeonSpaceToDepthWorkload.hpp"
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +010075#include "workloads/NeonSplitterWorkload.hpp"
Teresa Charlin06145cc2022-05-05 15:31:30 +010076#include "workloads/NeonSqrtWorkload.hpp"
Matthew Jackson87f65ea2019-08-01 10:01:34 +010077#include "workloads/NeonStackWorkload.hpp"
FinnWilliamsArm1fa19192019-08-02 17:26:31 +010078#include "workloads/NeonStridedSliceWorkload.hpp"
Conor Kennedyb99480b2019-03-08 08:24:41 +000079#include "workloads/NeonSubtractionWorkload.hpp"
Sadik Armagan581742d2019-08-12 14:11:37 +010080#include "workloads/NeonTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000081#include "workloads/NeonTransposeWorkload.hpp"
Cathal Corbettfd5bec42022-03-03 15:13:23 +000082#include "workloads/NeonUnidirectionalSequenceLstmFloatWorkload.hpp"
Mike Kelly12994962022-04-21 11:57:09 +010083#include "workloads/NeonUnidirectionalSequenceLstmWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000084#endif
85
telsoa014fcda012018-03-09 14:13:49 +000086namespace armnn
87{
telsoa014fcda012018-03-09 14:13:49 +000088
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010089namespace
arovir017ff76c52018-10-09 09:40:58 +010090{
telsoa014fcda012018-03-09 14:13:49 +000091
Derek Lamberti901ea112019-12-10 22:07:09 +000092template< typename ... Args>
93bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +000094{
Jan Eilers8eb25602020-03-09 12:13:48 +000095 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +000096#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000097 return true;
98#else
Derek Lamberti0790dce2019-04-15 18:37:35 +010099 SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
telsoa014fcda012018-03-09 14:13:49 +0000100 return false;
101#endif
102}
103
telsoa01c577f2c2018-08-31 09:22:23 +0100104template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100105bool IsSupportedForDataTypeNeon(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000106 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100107 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000108 Uint8Func uint8FuncPtr,
109 Params&&... params)
110{
111 return IsNeonBackendSupported(reasonIfUnsupported) &&
112 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
113 dataType,
114 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100115 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000116 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000117 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000118 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000119 std::forward<Params>(params)...);
120}
121
Matteo Martincighd95e9062019-01-31 15:35:59 +0000122#if defined(ARMCOMPUTENEON_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000123template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100124inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000125{
126 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
127 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
128 if (!supported && reasonIfUnsupported)
129 {
arovir01085f0a42018-10-08 14:48:19 +0100130 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000131 }
132 return supported;
133}
134
135#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
136 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
137#else
138#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000139 return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000140#endif
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100141} // anonymous namespace
142
Sadik Armagan045f6be2020-09-10 13:37:32 +0100143NeonLayerSupport::NeonLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
144 : m_ModelContextPtr(modelContextPtr)
145{
146}
147
148NeonLayerSupport::NeonLayerSupport()
149 : m_ModelContextPtr(nullptr)
150{
151}
152
Cathal Corbett34b429c2021-12-24 12:24:40 +0000153bool NeonLayerSupport::IsLayerSupported(const LayerType& type,
154 const std::vector<TensorInfo>& infos,
155 const BaseDescriptor& descriptor,
156 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
157 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
158 Optional<std::string&> reasonIfUnsupported) const
159{
160 switch (type)
161 {
162 case LayerType::Activation:
163 return IsActivationSupported(infos[0],
164 infos[1],
165 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
166 reasonIfUnsupported);
167 case LayerType::Addition:
168 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
169 case LayerType::ArgMinMax:
170 return IsArgMinMaxSupported(infos[0],
171 infos[1],
172 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
173 reasonIfUnsupported);
174 case LayerType::BatchNormalization:
175 return IsBatchNormalizationSupported(infos[0],
176 infos[1],
177 infos[2],
178 infos[3],
179 infos[4],
180 infos[5],
181 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
182 (&descriptor)),
183 reasonIfUnsupported);
184 case LayerType::BatchToSpaceNd:
185 return IsBatchToSpaceNdSupported(infos[0],
186 infos[1],
187 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
188 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000189 case LayerType::Cast:
190 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
191 case LayerType::ChannelShuffle:
192 return IsChannelShuffleSupported(infos[0],
193 infos[1],
194 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
195 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000196 case LayerType::Comparison:
197 return IsComparisonSupported(infos[0],
198 infos[1],
199 infos[2],
200 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
201 reasonIfUnsupported);
202 case LayerType::Concat:
203 {
204 std::vector<const TensorInfo*> inputInfos;
205 for (uint32_t i = 0; i < (infos.size() - 1); i++)
206 {
207 inputInfos.push_back(&infos[i]);
208 }
209 return IsConcatSupported(inputInfos,
210 infos[infos.size() - 1],
211 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
212 reasonIfUnsupported);
213 }
214 case LayerType::Constant:
215 return IsConstantSupported(infos[0], reasonIfUnsupported);
216 case LayerType::ConvertBf16ToFp32:
217 return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
218 case LayerType::ConvertFp16ToFp32:
219 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
220 case LayerType::ConvertFp32ToBf16:
221 return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported);
222 case LayerType::ConvertFp32ToFp16:
223 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
224 case LayerType::Convolution2d:
225 {
226 if (infos.size() != 4)
227 {
228 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
229 "TensorInfos should be of format: {input, output, weights, biases}.");
230 }
231
232 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
233 if (infos[3] == TensorInfo())
234 {
235 return IsConvolution2dSupported(infos[0],
236 infos[1],
237 desc,
238 infos[2],
239 EmptyOptional(),
240 reasonIfUnsupported);
241 }
242 else
243 {
244 return IsConvolution2dSupported(infos[0],
245 infos[1],
246 desc,
247 infos[2],
248 infos[3],
249 reasonIfUnsupported);
250 }
251 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000252 case LayerType::Convolution3d:
253 {
254 if (infos.size() != 4)
255 {
256 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
257 "TensorInfos should be of format: {input, output, weights, biases}.");
258 }
259
260 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
261 if (infos[3] == TensorInfo())
262 {
263 return IsConvolution3dSupported(infos[0],
264 infos[1],
265 desc,
266 infos[2],
267 EmptyOptional(),
268 reasonIfUnsupported);
269 }
270 else
271 {
272 return IsConvolution3dSupported(infos[0],
273 infos[1],
274 desc,
275 infos[2],
276 infos[3],
277 reasonIfUnsupported);
278 }
279 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000280 case LayerType::DepthToSpace:
281 return IsDepthToSpaceSupported(infos[0],
282 infos[1],
283 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
284 reasonIfUnsupported);
285 case LayerType::DepthwiseConvolution2d:
286 {
287 if (infos.size() != 4)
288 {
289 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
290 "TensorInfos should be of format: {input, output, weights, biases}.");
291 }
292
293 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
294 if (infos[3] == TensorInfo())
295 {
296 return IsDepthwiseConvolutionSupported(infos[0],
297 infos[1],
298 desc,
299 infos[2],
300 EmptyOptional(),
301 reasonIfUnsupported);
302 }
303 else
304 {
305 return IsDepthwiseConvolutionSupported(infos[0],
306 infos[1],
307 desc,
308 infos[2],
309 infos[3],
310 reasonIfUnsupported);
311 }
312 }
313 case LayerType::Dequantize:
314 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000315 case LayerType::DetectionPostProcess:
316 {
317 auto desc = *(PolymorphicDowncast<const DetectionPostProcessDescriptor*>(&descriptor));
318 return LayerSupportBase::IsDetectionPostProcessSupported(infos[0],
319 infos[1],
320 infos[2],
321 infos[3],
322 infos[4],
323 infos[5],
324 infos[6],
325 desc,
326 reasonIfUnsupported);
327 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000328 case LayerType::Division:
329 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
330 case LayerType::ElementwiseUnary:
331 return IsElementwiseUnarySupported(infos[0],
332 infos[1],
333 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
334 reasonIfUnsupported);
335 case LayerType::Fill:
336 return IsFillSupported(infos[0],
337 infos[1],
338 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
339 reasonIfUnsupported);
340 case LayerType::Floor:
341 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
342 case LayerType::FullyConnected:
343 return IsFullyConnectedSupported(infos[0],
344 infos[1],
345 infos[2],
346 infos[3],
347 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
348 reasonIfUnsupported);
349 case LayerType::Gather:
350 return IsGatherSupported(infos[0],
351 infos[1],
352 infos[2],
353 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
354 reasonIfUnsupported);
Teresa Charlinbd22c7d2022-04-26 18:14:12 +0100355 case LayerType::GatherNd:
356 return IsGatherNdSupported(infos[0],
357 infos[1],
358 infos[2],
359 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000360 case LayerType::Input:
361 return IsInputSupported(infos[0], reasonIfUnsupported);
362 case LayerType::InstanceNormalization:
363 return IsInstanceNormalizationSupported(infos[0],
364 infos[1],
365 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
366 (&descriptor)),
367 reasonIfUnsupported);
368 case LayerType::L2Normalization:
369 return IsL2NormalizationSupported(infos[0],
370 infos[1],
371 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
372 reasonIfUnsupported);
373 case LayerType::LogicalBinary:
374 return IsLogicalBinarySupported(infos[0],
375 infos[1],
376 infos[2],
377 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
378 reasonIfUnsupported);
379 case LayerType::LogSoftmax:
380 return IsLogSoftmaxSupported(infos[0],
381 infos[1],
382 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
383 reasonIfUnsupported);
384 case LayerType::Lstm:
385 return IsLstmSupported(infos[0],
386 infos[1],
387 infos[2],
388 infos[3],
389 infos[4],
390 infos[5],
391 infos[6],
392 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
393 lstmParamsInfo.value(),
394 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000395 case LayerType::Map:
396 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000397 case LayerType::Maximum:
398 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
399 case LayerType::Mean:
400 return IsMeanSupported(infos[0],
401 infos[1],
402 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
403 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000404 case LayerType::MemCopy:
405 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
406 case LayerType::MemImport:
407 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
408 case LayerType::Merge:
409 return LayerSupportBase::IsMergeSupported(infos[0],
410 infos[1],
411 infos[2],
412 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000413 case LayerType::Minimum:
414 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
415 case LayerType::Multiplication:
416 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
417 case LayerType::Normalization:
418 return IsNormalizationSupported(infos[0],
419 infos[1],
420 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
421 reasonIfUnsupported);
422 case LayerType::Output:
423 return IsOutputSupported(infos[0], reasonIfUnsupported);
424 case LayerType::Pad:
425 return IsPadSupported(infos[0],
426 infos[1],
427 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
428 reasonIfUnsupported);
429 case LayerType::Permute:
430 return IsPermuteSupported(infos[0],
431 infos[1],
432 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
433 reasonIfUnsupported);
434 case LayerType::Pooling2d:
435 return IsPooling2dSupported(infos[0],
436 infos[1],
437 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
438 reasonIfUnsupported);
Ryan OShea19e79422022-05-04 00:38:03 +0100439 case LayerType::Pooling3d:
440 return IsPooling3dSupported(infos[0],
441 infos[1],
442 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
443 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000444 case LayerType::Prelu:
445 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000446 case LayerType::QLstm:
447 return IsQLstmSupported(infos[0],
448 infos[1],
449 infos[2],
450 infos[3],
451 infos[4],
452 infos[5],
453 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
454 lstmParamsInfo.value(),
455 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000456 case LayerType::Quantize:
457 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
458 case LayerType::QuantizedLstm:
459 return IsQuantizedLstmSupported(infos[0],
460 infos[1],
461 infos[2],
462 infos[3],
463 infos[4],
464 quantizedLstmParamsInfo.value(),
465 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000466 case LayerType::Rank:
467 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000468 case LayerType::Reshape:
469 return IsReshapeSupported(infos[0],
470 infos[1],
471 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
472 reasonIfUnsupported);
473 case LayerType::Resize:
474 return IsResizeSupported(infos[0],
475 infos[1],
476 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
477 reasonIfUnsupported);
478 case LayerType::Reduce:
479 return IsReduceSupported(infos[0],
480 infos[1],
481 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
482 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000483 case LayerType::Shape:
484 return LayerSupportBase::IsShapeSupported(infos[0],
485 infos[1],
486 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000487 case LayerType::Slice:
488 return IsSliceSupported(infos[0],
489 infos[1],
490 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
491 reasonIfUnsupported);
492 case LayerType::Softmax:
493 return IsSoftmaxSupported(infos[0],
494 infos[1],
495 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
496 reasonIfUnsupported);
497 case LayerType::SpaceToBatchNd:
498 return IsSpaceToBatchNdSupported(infos[0],
499 infos[1],
500 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
501 reasonIfUnsupported);
502 case LayerType::SpaceToDepth:
503 return IsSpaceToDepthSupported(infos[0],
504 infos[1],
505 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
506 reasonIfUnsupported);
507 case LayerType::Splitter:
508 {
509 std::vector<TensorInfo> outputInfos;
510 for (uint32_t i = 1; i < infos.size(); i++)
511 {
512 outputInfos.push_back(infos[i]);
513 }
514 return IsSplitterSupported(infos[0],
515 {outputInfos.begin(), outputInfos.end()},
516 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
517 reasonIfUnsupported);
518 }
519 case LayerType::Stack:
520 {
521 std::vector<const TensorInfo*> inputInfos;
522 for (uint32_t i = 0; i < infos.size() - 1; i++)
523 {
524 inputInfos.push_back(&infos[i]);
525 }
526 return IsStackSupported(inputInfos,
527 infos[infos.size() - 1],
528 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
529 reasonIfUnsupported);
530 }
531 case LayerType::StridedSlice:
532 return IsStridedSliceSupported(infos[0],
533 infos[1],
534 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
535 reasonIfUnsupported);
536 case LayerType::Subtraction:
537 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
538 case LayerType::Transpose:
539 return IsTransposeSupported(infos[0],
540 infos[1],
541 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
542 reasonIfUnsupported);
543 case LayerType::TransposeConvolution2d:
544 {
545 if (infos.size() != 4)
546 {
547 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
548 "TensorInfos should be of format: {input, output, weights, biases}.");
549 }
550
551 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
552 if (infos[3] == TensorInfo())
553 {
554 return IsTransposeConvolution2dSupported(infos[0],
555 infos[1],
556 desc,
557 infos[2],
558 EmptyOptional(),
559 reasonIfUnsupported);
560 }
561 else
562 {
563 return IsTransposeConvolution2dSupported(infos[0],
564 infos[1],
565 desc,
566 infos[2],
567 infos[3],
568 reasonIfUnsupported);
569 }
570 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000571 case LayerType::UnidirectionalSequenceLstm:
572 return IsUnidirectionalSequenceLstmSupported(infos[0],
573 infos[1],
574 infos[2],
575 infos[3],
576 infos[4],
577 infos[5],
578 *(PolymorphicDowncast<const
579 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
580 lstmParamsInfo.value(),
581 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000582 case LayerType::Unmap:
583 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000584 default:
585 // layers not supported in neon by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000586 // debug, fakequantization, precompiled,
Ryan OShea19e79422022-05-04 00:38:03 +0100587 // standin, switch
Cathal Corbett34b429c2021-12-24 12:24:40 +0000588 return false;
589 }
590}
591
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100592bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
593 const TensorInfo& output,
594 const ActivationDescriptor& descriptor,
595 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000596{
Jan Eilers8eb25602020-03-09 12:13:48 +0000597 IgnoreUnused(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +0100598 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
599 reasonIfUnsupported,
600 input,
601 output,
602 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000603}
604
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100605bool NeonLayerSupport::IsAdditionSupported(const TensorInfo& input0,
606 const TensorInfo& input1,
607 const TensorInfo& output,
608 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000609{
telsoa01c577f2c2018-08-31 09:22:23 +0100610 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAdditionWorkloadValidate,
611 reasonIfUnsupported,
612 input0,
613 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000614 output,
615 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000616}
617
James Conroyd47a0642019-09-17 14:22:06 +0100618bool NeonLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
619 const TensorInfo& output,
620 const ArgMinMaxDescriptor& descriptor,
621 Optional<std::string&> reasonIfUnsupported) const
622{
623 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonArgMinMaxWorkloadValidate,
624 reasonIfUnsupported,
625 input,
626 output,
627 descriptor);
628}
629
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100630bool NeonLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
631 const TensorInfo& output,
632 const TensorInfo& mean,
633 const TensorInfo& var,
634 const TensorInfo& beta,
635 const TensorInfo& gamma,
636 const BatchNormalizationDescriptor& descriptor,
637 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000638{
telsoa01c577f2c2018-08-31 09:22:23 +0100639 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchNormalizationValidate,
640 reasonIfUnsupported,
641 input,
642 output,
643 mean,
644 var,
645 beta,
646 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000647 descriptor,
648 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000649}
650
Mike Kelly56858022020-01-27 12:14:47 +0000651bool NeonLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
652 const TensorInfo& output,
653 const BatchToSpaceNdDescriptor& descriptor,
654 Optional<std::string&> reasonIfUnsupported) const
655{
656 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonBatchToSpaceNdWorkloadValidate,
657 reasonIfUnsupported,
658 input,
659 output,
660 descriptor);
661}
662
Sadik Armagan48f011e2021-04-21 10:50:34 +0100663bool NeonLayerSupport::IsCastSupported(const TensorInfo& input,
664 const TensorInfo& output,
665 Optional<std::string&> reasonIfUnsupported) const
666{
667 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonCastValidate,
668 reasonIfUnsupported,
669 input,
670 output);
671}
672
Teresa Charline89dd692021-09-01 16:30:34 +0100673bool NeonLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
674 const TensorInfo& output,
675 const ChannelShuffleDescriptor& descriptor,
676 Optional<std::string&> reasonIfUnsupported) const
677{
678 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonChannelShuffleValidate,
679 reasonIfUnsupported,
680 input,
681 output,
682 descriptor);
683}
684
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100685bool NeonLayerSupport::IsComparisonSupported(const TensorInfo& input0,
686 const TensorInfo& input1,
687 const TensorInfo& output,
688 const ComparisonDescriptor& descriptor,
689 Optional<std::string&> reasonIfUnsupported) const
690{
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100691
Teresa Charlincedd34f2020-03-30 11:17:30 +0100692 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonComparisonWorkloadValidate,
693 reasonIfUnsupported,
694 input0,
695 input1,
696 output,
697 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100698}
699
Jim Flynn906f9462019-05-10 13:55:21 +0100700bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
701 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000702 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100703 Optional<std::string&> reasonIfUnsupported) const
704{
Jim Flynne242f2d2019-05-22 14:24:13 +0100705 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
706 {
707 SetValueChecked(reasonIfUnsupported, "Neon Concat: Concat axis > Number of dimensions.");
708 return false;
709 }
710
711 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
712 if(concatInnerAxis < 3) // Width, height, or channels
713 {
714 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConcatWorkloadValidate,
715 reasonIfUnsupported,
716 inputs,
717 output,
718 descriptor);
719 }
720 else if (concatInnerAxis == 3)
721 {
722 for (auto& input : inputs)
723 {
724 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
725 {
726 SetValueChecked(reasonIfUnsupported, "Neon Concat: Types and quantization parameters must match.");
727 return false;
728 }
729 }
730 return true; // Sub-tensors support concat along batch
731 }
732 else // > 4 dimensions not supported.
733 {
734 SetValueChecked(reasonIfUnsupported, "Neon Concat: Maximum of 4 dimensions supported.");
735 return false;
736 }
Jim Flynn906f9462019-05-10 13:55:21 +0100737}
738
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100739bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
740 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000741{
Mike Kelly0886ac42020-04-27 09:55:40 +0100742 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
743 reasonIfUnsupported,
744 output);
telsoa014fcda012018-03-09 14:13:49 +0000745}
746
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100747bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
748 const TensorInfo& output,
749 Optional<std::string&> reasonIfUnsupported) const
750{
751 armnn::IgnoreUnused(input);
752 armnn::IgnoreUnused(output);
753 armnn::IgnoreUnused(reasonIfUnsupported);
754 return true;
755}
756
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100757bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
758 const TensorInfo& output,
759 Optional<std::string&> reasonIfUnsupported) const
760{
Jan Eilers8eb25602020-03-09 12:13:48 +0000761 armnn::IgnoreUnused(input);
762 armnn::IgnoreUnused(output);
763 armnn::IgnoreUnused(reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100764 return true;
765}
766
Narumol Prangnawarat250d3922020-03-30 16:11:04 +0100767bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input,
768 const TensorInfo& output,
769 Optional<std::string&> reasonIfUnsupported) const
770{
771 armnn::IgnoreUnused(input);
772 armnn::IgnoreUnused(output);
773 armnn::IgnoreUnused(reasonIfUnsupported);
774 return true;
775}
776
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100777bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
778 const TensorInfo& output,
779 Optional<std::string&> reasonIfUnsupported) const
780{
Jan Eilers8eb25602020-03-09 12:13:48 +0000781 armnn::IgnoreUnused(input);
782 armnn::IgnoreUnused(output);
783 armnn::IgnoreUnused(reasonIfUnsupported);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100784 return true;
785}
786
787bool NeonLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
788 const TensorInfo& output,
789 const Convolution2dDescriptor& descriptor,
790 const TensorInfo& weights,
791 const Optional<TensorInfo>& biases,
792 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000793{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100794 bool isFastMathEnabled = false;
795#if defined(ARMCOMPUTENEON_ENABLED)
796 if (m_ModelContextPtr)
797 {
798 if (m_ModelContextPtr.get() != nullptr)
799 {
Sadik Armagan04a72972020-09-14 15:44:18 +0100800 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
Sadik Armagan045f6be2020-09-10 13:37:32 +0100801 if (modelOptions)
802 {
803 isFastMathEnabled = modelOptions->IsFastMathEnabled();
804 }
805 }
806 }
807#endif
808
surmeh013537c2c2018-05-18 16:31:43 +0100809 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution2dWorkloadValidate,
810 reasonIfUnsupported,
811 input,
812 output,
813 descriptor,
814 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100815 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000816 isFastMathEnabled,
817 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000818}
819
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100820bool NeonLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
821 const TensorInfo& output,
822 const Convolution3dDescriptor& descriptor,
823 const TensorInfo& weights,
824 const Optional<TensorInfo>& biases,
825 Optional<std::string&> reasonIfUnsupported) const
826{
827 bool isFastMathEnabled = false;
828#if defined(ARMCOMPUTENEON_ENABLED)
829 if (m_ModelContextPtr)
830 {
831 if (m_ModelContextPtr.get() != nullptr)
832 {
833 auto modelOptions = dynamic_cast<NeonBackendModelContext*>(m_ModelContextPtr.get());
834 if (modelOptions)
835 {
836 isFastMathEnabled = modelOptions->IsFastMathEnabled();
837 }
838 }
839 }
840#endif
841
842 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConvolution3dWorkloadValidate,
843 reasonIfUnsupported,
844 input,
845 output,
846 descriptor,
847 weights,
848 biases,
849 isFastMathEnabled,
850 nullptr);
851}
852
Aron Virginas-Tar2f00b742019-09-30 13:28:08 +0100853bool NeonLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
854 const TensorInfo& output,
855 const DepthToSpaceDescriptor& descriptor,
856 Optional<std::string&> reasonIfUnsupported) const
857{
858 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthToSpaceWorkloadValidate,
859 reasonIfUnsupported,
860 input,
861 output,
862 descriptor);
863}
864
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100865bool NeonLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
866 const TensorInfo& output,
867 const DepthwiseConvolution2dDescriptor& descriptor,
868 const TensorInfo& weights,
869 const Optional<TensorInfo>& biases,
870 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000871{
telsoa01c577f2c2018-08-31 09:22:23 +0100872 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
873 reasonIfUnsupported,
874 input,
875 output,
876 descriptor,
877 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000878 biases,
879 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000880}
881
Narumol Prangnawarat01961a72019-05-30 16:47:12 +0100882bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
883 const TensorInfo& output,
884 Optional<std::string&> reasonIfUnsupported) const
885{
886 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
887 reasonIfUnsupported,
888 input,
889 output);
890}
891
Pablo Tellof0bd6832019-04-26 17:58:13 +0100892bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
893 const TensorInfo& output,
894 const DepthwiseConvolution2dDescriptor& descriptor,
895 const TensorInfo& weights,
896 const Optional<TensorInfo>& biases,
897 Optional<std::string&> reasonIfUnsupported) const
898{
899 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDepthwiseConvolutionWorkloadValidate,
900 reasonIfUnsupported,
901 input,
902 output,
903 descriptor,
904 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000905 biases,
906 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100907}
908
josh minor4a3c6102020-01-06 16:40:46 -0600909bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
910 const TensorInfo& output,
911 const ElementwiseUnaryDescriptor& descriptor,
912 Optional<std::string&> reasonIfUnsupported) const
913{
Derek Lambertic77874a2020-04-28 13:34:56 +0100914 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600915 {
Derek Lambertic77874a2020-04-28 13:34:56 +0100916 case UnaryOperation::Abs:
917 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
918 reasonIfUnsupported,
919 input,
920 output);
921 case UnaryOperation::Exp:
922 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonExpWorkloadValidate,
923 reasonIfUnsupported,
924 input,
925 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100926 case UnaryOperation::LogicalNot:
927 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
928 reasonIfUnsupported,
929 input,
930 output);
931 case UnaryOperation::Log:
932 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogWorkloadValidate,
933 reasonIfUnsupported,
934 input,
935 output);
Derek Lambertic77874a2020-04-28 13:34:56 +0100936 case UnaryOperation::Neg:
937 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
938 reasonIfUnsupported,
939 input,
940 output);
941 case UnaryOperation::Rsqrt:
942 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate,
943 reasonIfUnsupported,
944 input,
945 output);
ryan.oshea3cff135b2021-10-07 15:28:14 +0000946 case UnaryOperation::Sin:
947 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSinWorkloadValidate,
948 reasonIfUnsupported,
949 input,
950 output);
Teresa Charlin06145cc2022-05-05 15:31:30 +0100951 case UnaryOperation::Sqrt:
952 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSqrtWorkloadValidate,
953 reasonIfUnsupported,
954 input,
955 output);
Derek Lambertic77874a2020-04-28 13:34:56 +0100956 default:
957 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600958 }
josh minor4a3c6102020-01-06 16:40:46 -0600959}
960
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100961bool NeonLayerSupport::IsFillSupported(const TensorInfo& input,
962 const TensorInfo& output,
963 const FillDescriptor& descriptor,
964 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagana792a052020-06-23 16:22:23 +0100965{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100966 armnn::IgnoreUnused(input);
967 armnn::IgnoreUnused(output);
968 armnn::IgnoreUnused(descriptor);
969
970 return IsNeonBackendSupported(reasonIfUnsupported);
Sadik Armagana792a052020-06-23 16:22:23 +0100971}
972
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100973bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
974 const TensorInfo& output,
975 Optional<std::string&> reasonIfUnsupported) const
976{
Jan Eilers8eb25602020-03-09 12:13:48 +0000977 armnn::IgnoreUnused(output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100978 return IsNeonBackendSupported(reasonIfUnsupported) &&
979 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
980 input.GetDataType(),
981 &FalseFuncF16<>,
982 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000983 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000984 &FalseFuncI32<>,
985 &FalseFuncU8<>);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +0100986}
987
988bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
989 const TensorInfo& output,
990 const TensorInfo& weights,
991 const TensorInfo& biases,
992 const FullyConnectedDescriptor& descriptor,
993 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000994{
telsoa01c577f2c2018-08-31 09:22:23 +0100995 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonFullyConnectedWorkloadValidate,
996 reasonIfUnsupported,
997 input,
998 output,
999 weights,
1000 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001001 descriptor,
1002 nullptr);
telsoa014fcda012018-03-09 14:13:49 +00001003}
1004
Teresa Charlinf540eb82020-04-10 19:24:55 +01001005bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
1006 const TensorInfo& input1,
1007 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001008 const GatherDescriptor& descriptor,
Teresa Charlinf540eb82020-04-10 19:24:55 +01001009 Optional<std::string&> reasonIfUnsupported) const
1010{
1011 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate,
1012 reasonIfUnsupported,
1013 input0,
1014 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001015 output,
1016 descriptor);
Teresa Charlinf540eb82020-04-10 19:24:55 +01001017}
1018
Teresa Charlinbd22c7d2022-04-26 18:14:12 +01001019bool NeonLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1020 const TensorInfo& input1,
1021 const TensorInfo& output,
1022 Optional<std::string&> reasonIfUnsupported) const
1023{
1024 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherNdWorkloadValidate,
1025 reasonIfUnsupported,
1026 input0,
1027 input1,
1028 output);
1029}
1030
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001031bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
1032 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001033{
Derek Lamberti901ea112019-12-10 22:07:09 +00001034 return IsNeonBackendSupported(reasonIfUnsupported, input);
telsoa014fcda012018-03-09 14:13:49 +00001035}
1036
Sadik Armagan0d4863d2019-10-09 14:26:32 +01001037bool NeonLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1038 const TensorInfo& output,
1039 const InstanceNormalizationDescriptor& descriptor,
1040 Optional<std::string&> reasonIfUnsupported) const
1041{
1042 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonInstanceNormalizationWorkloadValidate,
1043 reasonIfUnsupported,
1044 input,
1045 output,
1046 descriptor);
1047}
1048
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001049bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1050 const TensorInfo& output,
1051 const L2NormalizationDescriptor& descriptor,
1052 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001053{
Matteo Martincighbcd3c852018-09-28 14:14:12 +01001054 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001055}
1056
James Conroy177df1e2020-11-13 10:18:51 +00001057bool NeonLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1058 const TensorInfo& input1,
1059 const TensorInfo& output,
1060 const LogicalBinaryDescriptor& descriptor,
1061 Optional<std::string&> reasonIfUnsupported) const
1062{
1063 switch(descriptor.m_Operation)
1064 {
1065 case LogicalBinaryOperation::LogicalAnd:
1066 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalAndWorkloadValidate,
1067 reasonIfUnsupported,
1068 input0,
1069 input1,
1070 output);
1071 case LogicalBinaryOperation::LogicalOr:
1072 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalOrWorkloadValidate,
1073 reasonIfUnsupported,
1074 input0,
1075 input1,
1076 output);
1077 default:
1078 return false;
1079 }
1080}
1081
Keith Davis69e653f2020-07-02 11:49:26 +01001082bool NeonLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1083 const TensorInfo& output,
1084 const LogSoftmaxDescriptor& descriptor,
1085 Optional<std::string&> reasonIfUnsupported) const
1086{
1087 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1088}
1089
Jan Eilersad5293a2019-07-08 09:57:55 +01001090bool NeonLayerSupport::IsLstmSupported(const TensorInfo& input,
1091 const TensorInfo& outputStateIn,
1092 const TensorInfo& cellStateIn,
1093 const TensorInfo& scratchBuffer,
1094 const TensorInfo& outputStateOut,
1095 const TensorInfo& cellStateOut,
1096 const TensorInfo& output,
1097 const LstmDescriptor& descriptor,
1098 const LstmInputParamsInfo& paramsInfo,
1099 Optional<std::string&> reasonIfUnsupported) const
1100{
1101 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLstmFloatWorkloadValidate,
1102 reasonIfUnsupported,
1103 input,
1104 outputStateIn,
1105 cellStateIn,
1106 scratchBuffer,
1107 outputStateOut,
1108 cellStateOut,
1109 output,
1110 descriptor,
1111 paramsInfo);
1112}
1113
Nattapat Chaimanowong4e6597a2018-12-20 14:14:06 +00001114bool NeonLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1115 const TensorInfo& input1,
1116 const TensorInfo& output,
1117 Optional<std::string&> reasonIfUnsupported) const
1118{
1119 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMaximumWorkloadValidate,
1120 reasonIfUnsupported,
1121 input0,
1122 input1,
1123 output);
1124}
1125
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001126bool NeonLayerSupport::IsMeanSupported(const TensorInfo& input,
1127 const TensorInfo& output,
1128 const MeanDescriptor& descriptor,
1129 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001130{
Matthew Benthamfd899962018-12-31 15:49:42 +00001131 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMeanWorkloadValidate,
1132 reasonIfUnsupported,
1133 input,
1134 output,
1135 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001136}
1137
Conor Kennedy54b21692019-01-09 07:57:38 +00001138bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1139 const TensorInfo& input1,
1140 const TensorInfo& output,
1141 Optional<std::string&> reasonIfUnsupported) const
1142{
1143 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMinimumWorkloadValidate,
1144 reasonIfUnsupported,
1145 input0,
1146 input1,
1147 output);
1148}
1149
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001150bool NeonLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1151 const TensorInfo& input1,
1152 const TensorInfo& output,
1153 Optional<std::string&> reasonIfUnsupported) const
1154{
1155 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMultiplicationWorkloadValidate,
1156 reasonIfUnsupported,
1157 input0,
1158 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001159 output,
1160 nullptr);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001161}
1162
Pablo Telloe61f0712020-01-23 10:37:17 +00001163bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
1164 const TensorInfo& input1,
1165 const TensorInfo& output,
1166 Optional<std::string&> reasonIfUnsupported) const
1167{
1168 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
1169 reasonIfUnsupported,
1170 input0,
1171 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001172 output,
1173 nullptr);
Pablo Telloe61f0712020-01-23 10:37:17 +00001174}
1175
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001176bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1177 const TensorInfo& output,
1178 const NormalizationDescriptor& descriptor,
1179 Optional<std::string&> reasonIfUnsupported) const
1180{
1181 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNormalizationWorkloadValidate,
1182 reasonIfUnsupported,
1183 input,
1184 output,
1185 descriptor);
1186}
1187
1188bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
1189 Optional<std::string&> reasonIfUnsupported) const
1190{
Derek Lamberti901ea112019-12-10 22:07:09 +00001191 return IsNeonBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001192}
1193
Éanna Ó Catháin12055742019-01-25 10:01:40 +00001194bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
1195 const TensorInfo& output,
1196 const PadDescriptor& descriptor,
1197 Optional<std::string&> reasonIfUnsupported) const
1198{
1199 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
1200 reasonIfUnsupported,
1201 input,
1202 output,
1203 descriptor);
1204}
1205
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001206bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
1207 const TensorInfo& output,
1208 const PermuteDescriptor& descriptor,
1209 Optional<std::string&> reasonIfUnsupported) const
1210{
1211 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001212}
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001213
1214bool NeonLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1215 const TensorInfo& output,
1216 const Pooling2dDescriptor& descriptor,
1217 Optional<std::string&> reasonIfUnsupported) const
1218{
1219 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1220}
1221
Ryan OShea19e79422022-05-04 00:38:03 +01001222bool NeonLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1223 const TensorInfo& output,
1224 const Pooling3dDescriptor& descriptor,
1225 Optional<std::string&> reasonIfUnsupported) const
1226{
1227 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1228}
1229
Nikhil Raj9b461482019-07-03 15:58:31 +01001230bool NeonLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1231 const armnn::TensorInfo &alpha,
1232 const armnn::TensorInfo &output,
1233 armnn::Optional<std::string &> reasonIfUnsupported) const
1234{
1235 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1236}
1237
James Conroycc340932020-05-12 18:08:52 +01001238bool NeonLayerSupport::IsQLstmSupported(const TensorInfo& input,
1239 const TensorInfo& previousOutputIn,
1240 const TensorInfo& previousCellStateIn,
1241 const TensorInfo& outputStateOut,
1242 const TensorInfo& cellStateOut,
1243 const TensorInfo& output,
1244 const QLstmDescriptor& descriptor,
1245 const LstmInputParamsInfo& paramsInfo,
1246 Optional<std::string&> reasonIfUnsupported) const
1247{
1248 // Check required here in order to pass IsLayerSupported for datatypes tests
1249 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1250 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1251 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1252 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1253 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1254 output.GetDataType() == armnn::DataType::QAsymmS8)
1255 {
1256 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQLstmWorkloadValidate,
1257 reasonIfUnsupported,
1258 input,
1259 previousCellStateIn,
1260 previousOutputIn,
1261 cellStateOut,
1262 outputStateOut,
1263 output,
1264 descriptor,
1265 paramsInfo);
1266 }
1267 else
1268 {
1269 return false;
1270 }
1271}
1272
Sadik Armaganfabc2892019-05-31 09:05:11 +01001273bool NeonLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1274 const TensorInfo& output,
1275 Optional<std::string&> reasonIfUnsupported) const
1276{
1277 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizeWorkloadValidate,
1278 reasonIfUnsupported,
1279 input,
1280 output);
1281}
1282
Francis Murtagh4fc3c482019-08-02 13:20:54 +01001283bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1284 const TensorInfo& cellStateIn,
1285 const TensorInfo& outputStateIn,
1286 const TensorInfo& cellStateOut,
1287 const TensorInfo& outputStateOut,
1288 const QuantizedLstmInputParamsInfo& paramsInfo,
1289 Optional<std::string&> reasonIfUnsupported) const
1290{
1291 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonQuantizedLstmWorkloadValidate,
1292 reasonIfUnsupported,
1293 input,
1294 cellStateIn,
1295 outputStateIn,
1296 cellStateOut,
1297 outputStateOut,
1298 paramsInfo);
1299}
1300
Sadik Armagana2747482021-02-09 10:28:54 +00001301bool NeonLayerSupport::IsReduceSupported(const TensorInfo& input,
1302 const TensorInfo& output,
1303 const ReduceDescriptor& descriptor,
1304 Optional<std::string&> reasonIfUnsupported) const
1305{
1306 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReduceWorkloadValidate,
1307 reasonIfUnsupported,
1308 input,
1309 output,
1310 descriptor);
1311}
1312
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001313bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001314 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001315 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001316 Optional<std::string&> reasonIfUnsupported) const
1317{
Jan Eilers8eb25602020-03-09 12:13:48 +00001318 armnn::IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001319 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
1320 reasonIfUnsupported,
1321 input,
1322 output);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001323}
1324
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001325bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
1326 const TensorInfo& output,
1327 const ResizeDescriptor& descriptor,
1328 Optional<std::string&> reasonIfUnsupported) const
1329{
Ellen Norris-Thompson37e68682019-07-15 14:23:30 +01001330 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
1331 reasonIfUnsupported,
1332 input,
1333 output,
1334 descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001335}
1336
josh minor036f02d2019-11-15 14:53:22 -06001337bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input,
1338 const TensorInfo& output,
1339 const SliceDescriptor& descriptor,
1340 Optional<std::string&> reasonIfUnsupported) const
1341{
1342 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSliceWorkloadValidate,
1343 reasonIfUnsupported,
1344 input,
1345 output,
1346 descriptor);
1347}
1348
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001349bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1350 const TensorInfo& output,
1351 const SoftmaxDescriptor& descriptor,
1352 Optional<std::string&> reasonIfUnsupported) const
1353{
1354 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1355}
1356
Mike Kelly0be3a882020-01-24 11:27:50 +00001357bool NeonLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1358 const TensorInfo& output,
1359 const SpaceToBatchNdDescriptor& descriptor,
1360 Optional<std::string&> reasonIfUnsupported) const
1361{
1362 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToBatchNdWorkloadValidate,
1363 reasonIfUnsupported,
1364 input,
1365 output,
1366 descriptor);
1367}
1368
Ellen Norris-Thompson29794572019-06-26 16:40:36 +01001369bool NeonLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
Mike Kelly0be3a882020-01-24 11:27:50 +00001370 const TensorInfo& output,
1371 const SpaceToDepthDescriptor& descriptor,
1372 Optional<std::string&> reasonIfUnsupported) const
Ellen Norris-Thompson29794572019-06-26 16:40:36 +01001373{
1374 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSpaceToDepthWorkloadValidate,
1375 reasonIfUnsupported,
1376 input,
1377 output,
1378 descriptor);
1379}
1380
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001381bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001382 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1383 const ViewsDescriptor& descriptor,
1384 Optional<std::string&> reasonIfUnsupported) const
1385{
1386#if defined(ARMCOMPUTENEON_ENABLED)
1387 // Split along the last dimension, cannot use sub-tensors
1388 // as width and height of the sub-tensors do not match
1389 // the width and height of the parent tensor
1390 // in case of input with more than 2D.
1391 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1392 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1393 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1394 {
1395 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
1396 reasonIfUnsupported,
1397 input,
1398 outputs,
1399 *splitAxis.begin());
1400 }
1401#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001402 IgnoreUnused(descriptor);
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001403 for (auto output : outputs)
1404 {
1405 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1406 {
1407 SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
1408 return false;
1409 }
1410 }
1411 return true;
1412}
1413
Matthew Jackson87f65ea2019-08-01 10:01:34 +01001414bool NeonLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1415 const TensorInfo& output,
1416 const StackDescriptor& descriptor,
1417 Optional<std::string&> reasonIfUnsupported) const
1418{
1419 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStackWorkloadValidate,
1420 reasonIfUnsupported,
1421 inputs,
1422 output,
1423 descriptor);
1424}
1425
FinnWilliamsArm1fa19192019-08-02 17:26:31 +01001426bool NeonLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1427 const TensorInfo& output,
1428 const StridedSliceDescriptor& descriptor,
1429 Optional<std::string&> reasonIfUnsupported) const
1430{
1431 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonStridedSliceWorkloadValidate,
1432 reasonIfUnsupported,
1433 input,
1434 output,
1435 descriptor);
1436}
1437
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001438bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1439 const TensorInfo& input1,
1440 const TensorInfo& output,
1441 Optional<std::string&> reasonIfUnsupported) const
1442{
1443 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
1444 reasonIfUnsupported,
1445 input0,
1446 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001447 output,
1448 nullptr);
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001449}
1450
Sadik Armagan581742d2019-08-12 14:11:37 +01001451bool NeonLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1452 const TensorInfo& output,
1453 const TransposeConvolution2dDescriptor& descriptor,
1454 const TensorInfo& weights,
1455 const Optional<TensorInfo>& biases,
1456 Optional<std::string&> reasonIfUnsupported) const
1457{
1458 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeConvolution2dWorkloadValidate,
1459 reasonIfUnsupported,
1460 input,
1461 output,
1462 descriptor,
1463 weights,
1464 biases);
1465}
1466
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001467bool NeonLayerSupport::IsTransposeSupported(const TensorInfo& input,
1468 const TensorInfo& output,
1469 const TransposeDescriptor& descriptor,
1470 Optional<std::string&> reasonIfUnsupported) const
1471{
1472 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1473}
1474
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001475bool NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1476 const TensorInfo& outputStateIn,
1477 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001478 const TensorInfo& outputStateOut,
1479 const TensorInfo& cellStateOut,
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001480 const TensorInfo& output,
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001481 const UnidirectionalSequenceLstmDescriptor& descriptor,
1482 const LstmInputParamsInfo& paramsInfo,
1483 Optional<std::string&> reasonIfUnsupported) const
1484{
Mike Kelly12994962022-04-21 11:57:09 +01001485 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1486 outputStateIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1487 cellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1488 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1489 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1490 output.GetDataType() == armnn::DataType::QAsymmS8)
1491 {
1492 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmWorkloadValidate,
1493 reasonIfUnsupported,
1494 input,
1495 outputStateIn,
1496 cellStateIn,
1497 outputStateOut,
1498 cellStateOut,
1499 output,
1500 descriptor,
1501 paramsInfo);
1502 }
1503 else
1504 {
1505 FORWARD_WORKLOAD_VALIDATE_FUNC(NeonUnidirectionalSequenceLstmFloatWorkloadValidate,
1506 reasonIfUnsupported,
1507 input,
1508 outputStateIn,
1509 cellStateIn,
1510 outputStateOut,
1511 cellStateOut,
1512 output,
1513 descriptor,
1514 paramsInfo);
1515 }
Cathal Corbettfd5bec42022-03-03 15:13:23 +00001516}
1517
Aron Virginas-Tarfc824312018-10-15 15:00:13 +01001518} // namespace armnn