blob: b63837539e290666d4ec89ad0b5a52be039a39bd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
Teresa Charlin94916a52022-10-19 08:48:07 +010025#include "workloads/ClBatchMatMulWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010026#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000027#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010028#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010029#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000030#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010031#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010032#include "workloads/ClConvertFp16ToFp32Workload.hpp"
33#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010034#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010035#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010036#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010037#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010038#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010039#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010040#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010041#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000042#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010043#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010044#include "workloads/ClGatherWorkload.hpp"
Teresa Charlin989e2f62022-04-27 16:26:11 +010045#include "workloads/ClGatherNdWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010046#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010047#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010048#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010049#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000050#include "workloads/ClLogicalAndWorkload.hpp"
51#include "workloads/ClLogicalNotWorkload.hpp"
52#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010053#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000054#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010055#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010056#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000057#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010058#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000059#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010060#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010061#include "workloads/ClPadWorkload.hpp"
62#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010063#include "workloads/ClPooling2dWorkload.hpp"
Ryan OSheabab8fa92022-03-09 10:29:02 +000064#include "workloads/ClPooling3dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010065#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010066#include "workloads/ClQLstmWorkload.hpp"
67#include "workloads/ClQuantizedLstmWorkload.hpp"
68#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000069#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000070#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010071#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010072#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010073#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000074#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010075#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000076#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010077#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010078#include "workloads/ClSplitterWorkload.hpp"
Teresa Charlinaac61122022-05-05 16:11:36 +010079#include "workloads/ClSqrtWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010080#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000081#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010082#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010083#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000084#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000085#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000086#endif
87
telsoa014fcda012018-03-09 14:13:49 +000088
89namespace armnn
90{
arovir017c22c702018-10-09 11:16:46 +010091
telsoa014fcda012018-03-09 14:13:49 +000092namespace
93{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010094
telsoa014fcda012018-03-09 14:13:49 +000095template<unsigned int FilterSize>
96bool IsMatchingSize2d(const TensorInfo& weightInfo)
97{
telsoa01c577f2c2018-08-31 09:22:23 +010098 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000099 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
100}
101
102template<uint32_t ValidStride>
103bool IsMatchingStride(uint32_t actualStride)
104{
105 return ValidStride == actualStride;
106}
107
108template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
109bool IsMatchingStride(uint32_t actualStride)
110{
111 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100112}
telsoa014fcda012018-03-09 14:13:49 +0000113
Derek Lamberti901ea112019-12-10 22:07:09 +0000114template<typename ... Args>
115bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000116{
Jan Eilers8eb25602020-03-09 12:13:48 +0000117 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000118#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000119 return true;
120#else
arovir01085f0a42018-10-08 14:48:19 +0100121 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000122 {
arovir01085f0a42018-10-08 14:48:19 +0100123 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000124 }
125 return false;
126#endif
127}
128
Matteo Martincighd95e9062019-01-31 15:35:59 +0000129#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000130#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
131#else
132#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
133#endif
134
Matteo Martincighd95e9062019-01-31 15:35:59 +0000135#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000136template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100137inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000138{
139 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
140 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
141 if (!supported && reasonIfUnsupported)
142 {
arovir01085f0a42018-10-08 14:48:19 +0100143 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000144 }
145 return supported;
146}
147
148#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
149 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
150#else
151#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000152 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000153#endif
154
telsoa01c577f2c2018-08-31 09:22:23 +0100155template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100156bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000157 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100158 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000159 Uint8Func uint8FuncPtr,
160 Params&&... params)
161{
162 return IsClBackendSupported(reasonIfUnsupported) &&
163 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
164 dataType,
165 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100166 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000167 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000168 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000169 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000170 std::forward<Params>(params)...);
171}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100172} // anonymous namespace
173
Sadik Armagan045f6be2020-09-10 13:37:32 +0100174ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
175 : m_ModelContextPtr(modelContextPtr)
176{
177}
178
179ClLayerSupport::ClLayerSupport()
180 : m_ModelContextPtr(nullptr)
181{
182}
183
Cathal Corbett34b429c2021-12-24 12:24:40 +0000184bool ClLayerSupport::IsLayerSupported(const LayerType& type,
185 const std::vector<TensorInfo>& infos,
186 const BaseDescriptor& descriptor,
187 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189 Optional<std::string&> reasonIfUnsupported) const
190{
191 switch (type)
192 {
193 case LayerType::Activation:
194 return IsActivationSupported(infos[0],
195 infos[1],
196 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
197 reasonIfUnsupported);
198 case LayerType::Addition:
Mike Kelly2c14db62023-03-15 15:06:23 +0000199 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000200 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000201 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000202 case LayerType::ArgMinMax:
203 return IsArgMinMaxSupported(infos[0],
204 infos[1],
205 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
206 reasonIfUnsupported);
Teresa Charlin94916a52022-10-19 08:48:07 +0100207 case LayerType::BatchMatMul:
208 return IsBatchMatMulSupported(infos[0],
209 infos[1],
210 infos[2],
211 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
212 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000213 case LayerType::BatchNormalization:
214 return IsBatchNormalizationSupported(infos[0],
215 infos[1],
216 infos[2],
217 infos[3],
218 infos[4],
219 infos[5],
220 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
221 (&descriptor)),
222 reasonIfUnsupported);
223 case LayerType::BatchToSpaceNd:
224 return IsBatchToSpaceNdSupported(infos[0],
225 infos[1],
226 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
227 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000228 case LayerType::Cast:
229 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
230 case LayerType::ChannelShuffle:
231 return IsChannelShuffleSupported(infos[0],
232 infos[1],
233 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
234 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000235 case LayerType::Comparison:
236 return IsComparisonSupported(infos[0],
237 infos[1],
238 infos[2],
239 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
240 reasonIfUnsupported);
241 case LayerType::Concat:
242 {
243 std::vector<const TensorInfo*> inputInfos;
244 for (uint32_t i = 0; i < (infos.size() - 1); i++)
245 {
246 inputInfos.push_back(&infos[i]);
247 }
248 return IsConcatSupported(inputInfos,
249 infos[infos.size() - 1],
250 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
251 reasonIfUnsupported);
252 }
253 case LayerType::Constant:
254 return IsConstantSupported(infos[0], reasonIfUnsupported);
255 case LayerType::ConvertFp16ToFp32:
256 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
257 case LayerType::ConvertFp32ToFp16:
258 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
259 case LayerType::Convolution2d:
260 {
261 if (infos.size() != 4)
262 {
263 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
264 "TensorInfos should be of format: {input, output, weights, biases}.");
265 }
266
267 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
268 if (infos[3] == TensorInfo())
269 {
270 return IsConvolution2dSupported(infos[0],
271 infos[1],
272 desc,
273 infos[2],
274 EmptyOptional(),
275 reasonIfUnsupported);
276 }
277 else
278 {
279 return IsConvolution2dSupported(infos[0],
280 infos[1],
281 desc,
282 infos[2],
283 infos[3],
284 reasonIfUnsupported);
285 }
286 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000287 case LayerType::Convolution3d:
288 {
289 if (infos.size() != 4)
290 {
291 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
292 "TensorInfos should be of format: {input, output, weights, biases}.");
293 }
294
295 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
296 if (infos[3] == TensorInfo())
297 {
298 return IsConvolution3dSupported(infos[0],
299 infos[1],
300 desc,
301 infos[2],
302 EmptyOptional(),
303 reasonIfUnsupported);
304 }
305 else
306 {
307 return IsConvolution3dSupported(infos[0],
308 infos[1],
309 desc,
310 infos[2],
311 infos[3],
312 reasonIfUnsupported);
313 }
314 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000315 case LayerType::DepthToSpace:
316 return IsDepthToSpaceSupported(infos[0],
317 infos[1],
318 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
319 reasonIfUnsupported);
320 case LayerType::DepthwiseConvolution2d:
321 {
322 if (infos.size() != 4)
323 {
324 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
325 "TensorInfos should be of format: {input, output, weights, biases}.");
326 }
327
328 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
329 if (infos[3] == TensorInfo())
330 {
331 return IsDepthwiseConvolutionSupported(infos[0],
332 infos[1],
333 desc,
334 infos[2],
335 EmptyOptional(),
336 reasonIfUnsupported);
337 }
338 else
339 {
340 return IsDepthwiseConvolutionSupported(infos[0],
341 infos[1],
342 desc,
343 infos[2],
344 infos[3],
345 reasonIfUnsupported);
346 }
347 }
348 case LayerType::Dequantize:
349 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
350 case LayerType::Division:
Mike Kelly2c14db62023-03-15 15:06:23 +0000351 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000352 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000353 ARMNN_NO_DEPRECATE_WARN_END
Mike Kelly3ec30772023-03-08 13:47:17 +0000354 case LayerType::ElementwiseBinary:
355 {
356 auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
357
358 switch (desc.m_Operation)
359 {
360 case BinaryOperation::Add:
361 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
362 reasonIfUnsupported,
363 infos[0],
364 infos[1],
365 infos[2],
366 nullptr);
367 case BinaryOperation::Div:
368 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
369 reasonIfUnsupported,
370 infos[0],
371 infos[1],
372 infos[2],
373 nullptr);
374 case BinaryOperation::Minimum:
375 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
376 reasonIfUnsupported,
377 infos[0],
378 infos[1],
379 infos[2]);
380 case BinaryOperation::Maximum:
381 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
382 reasonIfUnsupported,
383 infos[0],
384 infos[1],
385 infos[2]);
386 case BinaryOperation::Mul:
387 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
388 reasonIfUnsupported,
389 infos[0],
390 infos[1],
391 infos[2],
392 nullptr);
393 case BinaryOperation::Sub:
394 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
395 reasonIfUnsupported,
396 infos[0],
397 infos[1],
398 infos[2],
399 nullptr);
400 default:
401 return false;
402 }
403 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000404 case LayerType::ElementwiseUnary:
405 return IsElementwiseUnarySupported(infos[0],
406 infos[1],
407 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
408 reasonIfUnsupported);
409 case LayerType::Fill:
410 return IsFillSupported(infos[0],
411 infos[1],
412 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
413 reasonIfUnsupported);
414 case LayerType::Floor:
415 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
416 case LayerType::FullyConnected:
417 return IsFullyConnectedSupported(infos[0],
418 infos[1],
419 infos[2],
420 infos[3],
421 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
422 reasonIfUnsupported);
423 case LayerType::Gather:
424 return IsGatherSupported(infos[0],
425 infos[1],
426 infos[2],
427 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
428 reasonIfUnsupported);
Teresa Charlin989e2f62022-04-27 16:26:11 +0100429 case LayerType::GatherNd:
430 return IsGatherNdSupported(infos[0],
431 infos[1],
432 infos[2],
433 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000434 case LayerType::Input:
435 return IsInputSupported(infos[0], reasonIfUnsupported);
436 case LayerType::InstanceNormalization:
437 return IsInstanceNormalizationSupported(infos[0],
438 infos[1],
439 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
440 (&descriptor)),
441 reasonIfUnsupported);
442 case LayerType::L2Normalization:
443 return IsL2NormalizationSupported(infos[0],
444 infos[1],
445 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
446 reasonIfUnsupported);
447 case LayerType::LogicalBinary:
448 return IsLogicalBinarySupported(infos[0],
449 infos[1],
450 infos[2],
451 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
452 reasonIfUnsupported);
453 case LayerType::LogSoftmax:
454 return IsLogSoftmaxSupported(infos[0],
455 infos[1],
456 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
457 reasonIfUnsupported);
458 case LayerType::Lstm:
459 return IsLstmSupported(infos[0],
460 infos[1],
461 infos[2],
462 infos[3],
463 infos[4],
464 infos[5],
465 infos[6],
466 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
467 lstmParamsInfo.value(),
468 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000469 case LayerType::Map:
470 return true;
471 case LayerType::MemCopy:
472 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
473 case LayerType::MemImport:
474 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
475 case LayerType::Merge:
476 return LayerSupportBase::IsMergeSupported(infos[0],
477 infos[1],
478 infos[2],
479 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000480 case LayerType::Maximum:
Mike Kelly2c14db62023-03-15 15:06:23 +0000481 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000482 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000483 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000484 case LayerType::Mean:
485 return IsMeanSupported(infos[0],
486 infos[1],
487 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
488 reasonIfUnsupported);
489 case LayerType::Minimum:
Mike Kelly2c14db62023-03-15 15:06:23 +0000490 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000491 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000492 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000493 case LayerType::Multiplication:
Mike Kelly2c14db62023-03-15 15:06:23 +0000494 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000495 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000496 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000497 case LayerType::Normalization:
498 return IsNormalizationSupported(infos[0],
499 infos[1],
500 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
501 reasonIfUnsupported);
502 case LayerType::Output:
503 return IsOutputSupported(infos[0], reasonIfUnsupported);
504 case LayerType::Pad:
505 return IsPadSupported(infos[0],
506 infos[1],
507 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
508 reasonIfUnsupported);
509 case LayerType::Permute:
510 return IsPermuteSupported(infos[0],
511 infos[1],
512 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
513 reasonIfUnsupported);
514 case LayerType::Pooling2d:
515 return IsPooling2dSupported(infos[0],
516 infos[1],
517 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
518 reasonIfUnsupported);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000519 case LayerType::Pooling3d:
520 return IsPooling3dSupported(infos[0],
521 infos[1],
522 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
523 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000524 case LayerType::Prelu:
525 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000526 case LayerType::QLstm:
527 return IsQLstmSupported(infos[0],
528 infos[1],
529 infos[2],
530 infos[3],
531 infos[4],
532 infos[5],
533 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
534 lstmParamsInfo.value(),
535 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000536 case LayerType::Quantize:
537 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
538 case LayerType::QuantizedLstm:
539 return IsQuantizedLstmSupported(infos[0],
540 infos[1],
541 infos[2],
542 infos[3],
543 infos[4],
544 quantizedLstmParamsInfo.value(),
545 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000546 case LayerType::Rank:
547 return true;
548 case LayerType::Reduce:
549 return IsReduceSupported(infos[0],
550 infos[1],
551 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
552 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000553 case LayerType::Reshape:
554 return IsReshapeSupported(infos[0],
555 infos[1],
556 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
557 reasonIfUnsupported);
558 case LayerType::Resize:
559 return IsResizeSupported(infos[0],
560 infos[1],
561 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
562 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000563 case LayerType::Shape:
564 return LayerSupportBase::IsShapeSupported(infos[0],
565 infos[1],
566 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000567 case LayerType::Slice:
568 return IsSliceSupported(infos[0],
569 infos[1],
570 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
571 reasonIfUnsupported);
572 case LayerType::Softmax:
573 return IsSoftmaxSupported(infos[0],
574 infos[1],
575 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
576 reasonIfUnsupported);
577 case LayerType::SpaceToBatchNd:
578 return IsSpaceToBatchNdSupported(infos[0],
579 infos[1],
580 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
581 reasonIfUnsupported);
582 case LayerType::SpaceToDepth:
583 return IsSpaceToDepthSupported(infos[0],
584 infos[1],
585 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
586 reasonIfUnsupported);
587 case LayerType::Splitter:
588 {
589 std::vector<TensorInfo> outputInfos;
590 for (uint32_t i = 1; i < infos.size(); i++)
591 {
592 outputInfos.push_back(infos[i]);
593 }
594 return IsSplitterSupported(infos[0],
595 {outputInfos.begin(), outputInfos.end()},
596 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
597 reasonIfUnsupported);
598 }
599 case LayerType::Stack:
600 {
601 std::vector<const TensorInfo*> inputInfos;
602 for (uint32_t i = 0; i < infos.size() - 1; i++)
603 {
604 inputInfos.push_back(&infos[i]);
605 }
606 return IsStackSupported(inputInfos,
607 infos[infos.size() - 1],
608 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
609 reasonIfUnsupported);
610 }
611 case LayerType::StridedSlice:
612 return IsStridedSliceSupported(infos[0],
613 infos[1],
614 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
615 reasonIfUnsupported);
616 case LayerType::Subtraction:
Mike Kelly2c14db62023-03-15 15:06:23 +0000617 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000618 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000619 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000620 case LayerType::Transpose:
621 return IsTransposeSupported(infos[0],
622 infos[1],
623 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
624 reasonIfUnsupported);
625 case LayerType::TransposeConvolution2d:
626 {
627 if (infos.size() != 4)
628 {
629 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
630 "TensorInfos should be of format: {input, output, weights, biases}.");
631 }
632
633 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
634 if (infos[3] == TensorInfo())
635 {
636 return IsTransposeConvolution2dSupported(infos[0],
637 infos[1],
638 desc,
639 infos[2],
640 EmptyOptional(),
641 reasonIfUnsupported);
642 }
643 else
644 {
645 return IsTransposeConvolution2dSupported(infos[0],
646 infos[1],
647 desc,
648 infos[2],
649 infos[3],
650 reasonIfUnsupported);
651 }
652 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000653 case LayerType::UnidirectionalSequenceLstm:
654 return IsUnidirectionalSequenceLstmSupported(infos[0],
655 infos[1],
656 infos[2],
657 infos[3],
658 infos[4],
659 infos[5],
660 *(PolymorphicDowncast<const
661 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
662 lstmParamsInfo.value(),
663 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000664 case LayerType::Unmap:
665 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000666 default:
667 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000668 // debug, detectionpostprocess, fakequantization,
669 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000670 return false;
671 }
672}
673
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100674bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
675 const TensorInfo& output,
676 const ActivationDescriptor& descriptor,
677 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000678{
telsoa01c577f2c2018-08-31 09:22:23 +0100679 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
680 reasonIfUnsupported,
681 input,
682 output,
683 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000684}
685
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100686bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
687 const TensorInfo& input1,
688 const TensorInfo& output,
689 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000690{
arovir01085f0a42018-10-08 14:48:19 +0100691 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
692 reasonIfUnsupported,
693 input0,
694 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000695 output,
696 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000697}
698
James Conroy2dc05722019-09-19 17:00:31 +0100699bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
700 const TensorInfo& output,
701 const ArgMinMaxDescriptor& descriptor,
702 Optional<std::string&> reasonIfUnsupported) const
703{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000704
James Conroy2dc05722019-09-19 17:00:31 +0100705 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
706 reasonIfUnsupported,
707 input,
708 output,
709 descriptor);
710}
711
Teresa Charlin94916a52022-10-19 08:48:07 +0100712bool ClLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX,
713 const TensorInfo& inputY,
714 const TensorInfo& output,
715 const BatchMatMulDescriptor& descriptor,
716 Optional<std::string&> reasonIfUnsupported) const
717{
718 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchMatMulValidate,
719 reasonIfUnsupported,
720 inputX,
721 inputY,
722 output,
723 descriptor);
724}
725
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100726bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
727 const TensorInfo& output,
728 const TensorInfo& mean,
729 const TensorInfo& var,
730 const TensorInfo& beta,
731 const TensorInfo& gamma,
732 const BatchNormalizationDescriptor& descriptor,
733 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000734{
telsoa01c577f2c2018-08-31 09:22:23 +0100735 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
736 reasonIfUnsupported,
737 input,
738 output,
739 mean,
740 var,
741 beta,
742 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000743 descriptor,
744 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000745}
746
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100747bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
748 const TensorInfo& output,
749 const BatchToSpaceNdDescriptor& descriptor,
750 Optional<std::string&> reasonIfUnsupported) const
751{
752 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
753 reasonIfUnsupported,
754 input,
755 output,
756 descriptor);
757}
758
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100759bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
760 const TensorInfo& output,
761 Optional<std::string&> reasonIfUnsupported) const
762{
763 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
764 reasonIfUnsupported,
765 input,
766 output);
767}
768
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100769bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000770 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100771 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000772 Optional<std::string&> reasonIfUnsupported) const
773{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100774 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000775 reasonIfUnsupported,
776 input,
777 output,
778 descriptor);
779}
780
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100781bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
782 const TensorInfo& input1,
783 const TensorInfo& output,
784 const ComparisonDescriptor& descriptor,
785 Optional<std::string&> reasonIfUnsupported) const
786{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000787 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
788 reasonIfUnsupported,
789 input0,
790 input1,
791 output,
792 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100793}
794
Jim Flynn906f9462019-05-10 13:55:21 +0100795bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
796 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000797 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100798 Optional<std::string&> reasonIfUnsupported) const
799{
Jim Flynne242f2d2019-05-22 14:24:13 +0100800 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
801 {
802 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
803 return false;
804 }
805
806 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
807 if(concatInnerAxis < 3) // Width, height, or channels
808 {
809 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
810 reasonIfUnsupported,
811 inputs,
812 output,
813 descriptor);
814 }
815 else if (concatInnerAxis == 3)
816 {
817 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
818 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
819 for (auto& input : inputs)
820 {
821 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
822 {
823 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
824 return false;
825 }
826 }
827 return true; // Sub-tensors support concat along batch
828 }
829 else // > 4 dimensions not supported.
830 {
831 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
832 return false;
833 }
Jim Flynn906f9462019-05-10 13:55:21 +0100834}
835
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100836bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
837 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000838{
Mike Kelly0886ac42020-04-27 09:55:40 +0100839 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
840 reasonIfUnsupported,
841 output);
telsoa014fcda012018-03-09 14:13:49 +0000842}
843
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100844bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
845 const TensorInfo& output,
846 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000847{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100848 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
849 reasonIfUnsupported,
850 input,
851 output);
telsoa014fcda012018-03-09 14:13:49 +0000852}
853
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100854bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
855 const TensorInfo& output,
856 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000857{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100858 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
859 reasonIfUnsupported,
860 input,
861 output);
telsoa014fcda012018-03-09 14:13:49 +0000862}
863
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100864bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
865 const TensorInfo& output,
866 const Convolution2dDescriptor& descriptor,
867 const TensorInfo& weights,
868 const Optional<TensorInfo>& biases,
869 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000870{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100871 bool isFastMathEnabled = false;
872#if defined(ARMCOMPUTECL_ENABLED)
873 if (m_ModelContextPtr)
874 {
875 if (m_ModelContextPtr.get() != nullptr)
876 {
877 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
878 if (modelOptions)
879 {
880 isFastMathEnabled = modelOptions->IsFastMathEnabled();
881 }
882 }
883 }
884#endif
885
surmeh013537c2c2018-05-18 16:31:43 +0100886 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
887 reasonIfUnsupported,
888 input,
889 output,
890 descriptor,
891 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100892 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000893 isFastMathEnabled,
894 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000895}
896
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100897bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
898 const TensorInfo& output,
899 const Convolution3dDescriptor& descriptor,
900 const TensorInfo& weights,
901 const Optional<TensorInfo>& biases,
902 Optional<std::string&> reasonIfUnsupported) const
903{
904 bool isFastMathEnabled = false;
905#if defined(ARMCOMPUTECL_ENABLED)
906 if (m_ModelContextPtr)
907{
908 if (m_ModelContextPtr.get() != nullptr)
909 {
910 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
911 if (modelOptions)
912 {
913 isFastMathEnabled = modelOptions->IsFastMathEnabled();
914 }
915 }
916}
917#endif
918
919 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
920 reasonIfUnsupported,
921 input,
922 output,
923 descriptor,
924 weights,
925 biases,
926 isFastMathEnabled,
927 nullptr);
928}
929
Jim Flynn983daec2019-05-29 16:20:16 +0100930bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
931 const TensorInfo& output,
932 Optional<std::string&> reasonIfUnsupported) const
933{
934 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
935 reasonIfUnsupported,
936 input,
937 output);
938}
939
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100940bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
941 const TensorInfo& output,
942 const DepthToSpaceDescriptor& descriptor,
943 Optional<std::string&> reasonIfUnsupported) const
944{
945 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
946 reasonIfUnsupported,
947 input,
948 output,
949 descriptor);
950}
951
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100952bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
953 const TensorInfo& output,
954 const DepthwiseConvolution2dDescriptor& descriptor,
955 const TensorInfo& weights,
956 const Optional<TensorInfo>& biases,
957 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000958{
telsoa01c577f2c2018-08-31 09:22:23 +0100959 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
960 reasonIfUnsupported,
961 input,
962 output,
963 descriptor,
964 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000965 biases,
966 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000967}
968
Pablo Tellof0bd6832019-04-26 17:58:13 +0100969bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
970 const TensorInfo& output,
971 const DepthwiseConvolution2dDescriptor& descriptor,
972 const TensorInfo& weights,
973 const Optional<TensorInfo>& biases,
974 Optional<std::string&> reasonIfUnsupported) const
975{
976 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
977 reasonIfUnsupported,
978 input,
979 output,
980 descriptor,
981 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000982 biases,
983 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100984}
985
986
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100987bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
988 const TensorInfo& input1,
989 const TensorInfo& output,
990 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100991{
992 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
993 reasonIfUnsupported,
994 input0,
995 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000996 output,
997 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100998}
999
josh minor4a3c6102020-01-06 16:40:46 -06001000bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
1001 const TensorInfo& output,
1002 const ElementwiseUnaryDescriptor& descriptor,
1003 Optional<std::string&> reasonIfUnsupported) const
1004{
Sadik Armagan9fabf432020-05-27 13:40:58 +01001005 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -06001006 {
Sadik Armagan9fabf432020-05-27 13:40:58 +01001007 case UnaryOperation::Abs:
1008 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
1009 reasonIfUnsupported,
1010 input,
1011 output);
1012 case UnaryOperation::Exp:
1013 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
1014 reasonIfUnsupported,
1015 input,
1016 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +01001017 case UnaryOperation::Log:
1018 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
1019 reasonIfUnsupported,
1020 input,
1021 output);
1022 case UnaryOperation::LogicalNot:
1023 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
1024 reasonIfUnsupported,
1025 input,
1026 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +01001027 case UnaryOperation::Neg:
1028 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
1029 reasonIfUnsupported,
1030 input,
1031 output);
1032 case UnaryOperation::Rsqrt:
1033 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
1034 reasonIfUnsupported,
1035 input,
1036 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +01001037 case UnaryOperation::Sin:
1038 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +00001039 reasonIfUnsupported,
1040 input,
1041 output);
Teresa Charlinaac61122022-05-05 16:11:36 +01001042 case UnaryOperation::Sqrt:
1043 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSqrtWorkloadValidate,
1044 reasonIfUnsupported,
1045 input,
1046 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +01001047 default:
1048 return false;
josh minor4a3c6102020-01-06 16:40:46 -06001049 }
josh minor4a3c6102020-01-06 16:40:46 -06001050}
1051
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001052bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
1053 const TensorInfo& output,
1054 const FillDescriptor& descriptor,
1055 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +01001056{
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001057 armnn::IgnoreUnused(input);
1058 armnn::IgnoreUnused(output);
1059 armnn::IgnoreUnused(descriptor);
1060
1061 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +01001062}
1063
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001064bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
1065 const TensorInfo& output,
1066 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001067{
Sadik Armagan9be49162019-10-30 16:15:26 +00001068 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
1069 reasonIfUnsupported,
1070 input,
1071 output);
telsoa01c577f2c2018-08-31 09:22:23 +01001072}
1073
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001074bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
1075 const TensorInfo& output,
1076 const TensorInfo& weights,
1077 const TensorInfo& biases,
1078 const FullyConnectedDescriptor& descriptor,
1079 Optional<std::string&> reasonIfUnsupported) const
1080{
1081 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
1082 reasonIfUnsupported,
1083 input,
1084 output,
1085 weights,
1086 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001087 descriptor,
1088 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001089}
1090
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001091bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1092 const TensorInfo& input1,
1093 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001094 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001095 Optional<std::string&> reasonIfUnsupported) const
1096{
1097 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1098 reasonIfUnsupported,
1099 input0,
1100 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001101 output,
1102 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001103}
1104
Teresa Charlin989e2f62022-04-27 16:26:11 +01001105bool ClLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1106 const TensorInfo& input1,
1107 const TensorInfo& output,
1108 Optional<std::string&> reasonIfUnsupported) const
1109{
1110 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherNdWorkloadValidate,
1111 reasonIfUnsupported,
1112 input0,
1113 input1,
1114 output);
1115}
1116
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001117bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1118 Optional<std::string&> reasonIfUnsupported) const
1119{
Derek Lamberti901ea112019-12-10 22:07:09 +00001120 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001121}
1122
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001123bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1124 const TensorInfo& output,
1125 const InstanceNormalizationDescriptor& descriptor,
1126 Optional<std::string&> reasonIfUnsupported) const
1127{
1128 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1129 reasonIfUnsupported,
1130 input,
1131 output,
1132 descriptor);
1133}
1134
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001135bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1136 const TensorInfo& output,
1137 const L2NormalizationDescriptor& descriptor,
1138 Optional<std::string&> reasonIfUnsupported) const
1139{
1140 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1141 reasonIfUnsupported,
1142 input,
1143 output,
1144 descriptor);
1145}
1146
James Conroyfe3ec942020-11-18 14:20:53 +00001147bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1148 const TensorInfo& input1,
1149 const TensorInfo& output,
1150 const LogicalBinaryDescriptor& descriptor,
1151 Optional<std::string&> reasonIfUnsupported) const
1152{
1153 IgnoreUnused(output);
1154
1155 switch(descriptor.m_Operation)
1156 {
1157 case LogicalBinaryOperation::LogicalAnd:
1158 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1159 reasonIfUnsupported,
1160 input0,
1161 input1,
1162 output);
1163 case LogicalBinaryOperation::LogicalOr:
1164 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1165 reasonIfUnsupported,
1166 input0,
1167 input1,
1168 output);
1169 default:
1170 return false;
1171 }
1172}
1173
1174
Teresa Charlin8398edc2020-07-20 14:23:02 +01001175bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1176 const TensorInfo& output,
1177 const LogSoftmaxDescriptor& descriptor,
1178 Optional<std::string&> reasonIfUnsupported) const
1179{
1180 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1181 reasonIfUnsupported,
1182 input,
1183 output,
1184 descriptor);
1185}
1186
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001187bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1188 const TensorInfo& outputStateIn,
1189 const TensorInfo& cellStateIn,
1190 const TensorInfo& scratchBuffer,
1191 const TensorInfo& outputStateOut,
1192 const TensorInfo& cellStateOut,
1193 const TensorInfo& output,
1194 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001195 const LstmInputParamsInfo& paramsInfo,
1196 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001197{
arovir01085f0a42018-10-08 14:48:19 +01001198 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1199 reasonIfUnsupported,
1200 input,
1201 outputStateIn,
1202 cellStateIn,
1203 scratchBuffer,
1204 outputStateOut,
1205 cellStateOut,
1206 output,
1207 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001208 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001209}
1210
keidav01a959ee52018-12-19 10:04:58 +00001211bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1212 const TensorInfo& input1,
1213 const TensorInfo& output,
1214 Optional<std::string&> reasonIfUnsupported) const
1215{
1216 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1217 reasonIfUnsupported,
1218 input0,
1219 input1,
1220 output);
1221}
1222
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001223bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1224 const TensorInfo& output,
1225 const MeanDescriptor& descriptor,
1226 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001227{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001228 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1229 reasonIfUnsupported,
1230 input,
1231 output,
1232 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001233}
1234
saoste019292aa32019-01-08 13:55:59 +00001235bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1236 const TensorInfo& input1,
1237 const TensorInfo& output,
1238 Optional<std::string&> reasonIfUnsupported) const
1239{
1240 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1241 reasonIfUnsupported,
1242 input0,
1243 input1,
1244 output);
1245}
1246
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001247bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1248 const TensorInfo& input1,
1249 const TensorInfo& output,
1250 Optional<std::string&> reasonIfUnsupported) const
1251{
1252 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1253 reasonIfUnsupported,
1254 input0,
1255 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001256 output,
1257 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001258}
1259
1260bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1261 const TensorInfo& output,
1262 const NormalizationDescriptor& descriptor,
1263 Optional<std::string&> reasonIfUnsupported) const
1264{
1265 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1266}
1267
1268bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1269 Optional<std::string&> reasonIfUnsupported) const
1270{
Derek Lamberti901ea112019-12-10 22:07:09 +00001271 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001272}
1273
1274bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1275 const TensorInfo& output,
1276 const PadDescriptor& descriptor,
1277 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001278{
1279 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1280 reasonIfUnsupported,
1281 input,
1282 output,
1283 descriptor);
1284}
1285
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001286bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1287 const TensorInfo& output,
1288 const PermuteDescriptor& descriptor,
1289 Optional<std::string&> reasonIfUnsupported) const
1290{
Matthew Bentham9820d302019-11-27 17:24:47 +00001291 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001292}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001293
1294bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1295 const TensorInfo& output,
1296 const Pooling2dDescriptor& descriptor,
1297 Optional<std::string&> reasonIfUnsupported) const
1298{
1299 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1300}
1301
Ryan OSheabab8fa92022-03-09 10:29:02 +00001302bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1303 const TensorInfo& output,
1304 const Pooling3dDescriptor& descriptor,
1305 Optional<std::string&> reasonIfUnsupported) const
1306{
1307 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1308}
1309
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001310bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1311 const armnn::TensorInfo &alpha,
1312 const armnn::TensorInfo &output,
1313 armnn::Optional<std::string &> reasonIfUnsupported) const
1314{
1315 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1316}
1317
Ryan OShea2323af42020-05-13 16:36:19 +01001318bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1319 const TensorInfo& previousOutputIn,
1320 const TensorInfo& previousCellStateIn,
1321 const TensorInfo& outputStateOut,
1322 const TensorInfo& cellStateOut,
1323 const TensorInfo& output,
1324 const QLstmDescriptor& descriptor,
1325 const LstmInputParamsInfo& paramsInfo,
1326 Optional<std::string&> reasonIfUnsupported) const
1327{
1328 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1329 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1330 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1331 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1332 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1333 output.GetDataType() == armnn::DataType::QAsymmS8)
1334 {
1335 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1336 reasonIfUnsupported,
1337 input,
1338 previousCellStateIn,
1339 previousOutputIn,
1340 cellStateOut,
1341 outputStateOut,
1342 output,
1343 descriptor,
1344 paramsInfo);
1345 }
1346 else
1347 {
1348 return false;
1349 }
1350}
1351
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001352bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1353 const TensorInfo& previousCellStateIn,
1354 const TensorInfo& previousOutputIn,
1355 const TensorInfo& cellStateOut,
1356 const TensorInfo& output,
1357 const QuantizedLstmInputParamsInfo& paramsInfo,
1358 Optional<std::string&> reasonIfUnsupported) const
1359{
1360 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1361 reasonIfUnsupported,
1362 input,
1363 previousCellStateIn,
1364 previousOutputIn,
1365 cellStateOut,
1366 output,
1367 paramsInfo);
1368}
1369
Sadik Armagan20ec2492019-05-31 09:09:44 +01001370bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1371 const TensorInfo& output,
1372 Optional<std::string&> reasonIfUnsupported) const
1373{
1374 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1375 reasonIfUnsupported,
1376 input,
1377 output);
1378}
1379
Sadik Armagana2747482021-02-09 10:28:54 +00001380bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1381 const TensorInfo& output,
1382 const ReduceDescriptor& descriptor,
1383 Optional<std::string&> reasonIfUnsupported) const
1384{
1385 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1386 reasonIfUnsupported,
1387 input,
1388 output,
1389 descriptor);
1390}
1391
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001392bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001393 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001394 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001395 Optional<std::string&> reasonIfUnsupported) const
1396{
Jan Eilers8eb25602020-03-09 12:13:48 +00001397 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001398 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001399}
1400
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001401bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1402 const TensorInfo& output,
1403 const ResizeDescriptor& descriptor,
1404 Optional<std::string&> reasonIfUnsupported) const
1405{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001406 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001407}
1408
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001409bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1410 const TensorInfo& output,
1411 const SliceDescriptor& descriptor,
1412 Optional<std::string&> reasonIfUnsupported) const
1413{
1414 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1415}
1416
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001417bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1418 const TensorInfo& output,
1419 const SoftmaxDescriptor& descriptor,
1420 Optional<std::string&> reasonIfUnsupported) const
1421{
Francis Murtagh3b938352019-07-26 15:44:17 +01001422 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001423}
1424
Sadik Armaganf4464322018-12-20 16:19:12 +00001425bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1426 const TensorInfo& output,
1427 const SpaceToBatchNdDescriptor& descriptor,
1428 Optional<std::string&> reasonIfUnsupported) const
1429{
1430 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1431 reasonIfUnsupported,
1432 input,
1433 output,
1434 descriptor);
1435}
1436
James Conroyd2aa85e2019-07-01 17:12:40 +01001437bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1438 const TensorInfo& output,
1439 const SpaceToDepthDescriptor& descriptor,
1440 Optional<std::string&> reasonIfUnsupported) const
1441{
1442 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1443 reasonIfUnsupported,
1444 input,
1445 output,
1446 descriptor);
1447}
1448
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001449bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001450 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1451 const ViewsDescriptor& descriptor,
1452 Optional<std::string&> reasonIfUnsupported) const
1453{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001454#if defined(ARMCOMPUTECL_ENABLED)
1455 // Split along the last dimension, cannot use sub-tensors
1456 // as width and height of the sub-tensors do not match
1457 // the width and height of the parent tensor
1458 // in case of input with more than 2D.
1459 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1460 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1461 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1462 {
1463 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1464 reasonIfUnsupported,
1465 input,
1466 outputs,
1467 *splitAxis.begin());
1468 }
1469#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001470 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001471 for (auto output : outputs)
1472 {
1473 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1474 {
1475 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1476 return false;
1477 }
1478 }
1479 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001480}
1481
Matthew Jacksond5166102019-07-31 14:06:28 +01001482bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1483 const TensorInfo& output,
1484 const StackDescriptor& descriptor,
1485 Optional<std::string&> reasonIfUnsupported) const
1486{
1487 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1488 reasonIfUnsupported,
1489 inputs,
1490 output,
1491 descriptor);
1492}
1493
keidav01d74dc912018-12-10 18:16:07 +00001494bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1495 const TensorInfo& output,
1496 const StridedSliceDescriptor& descriptor,
1497 Optional<std::string&> reasonIfUnsupported) const
1498{
1499 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1500 reasonIfUnsupported,
1501 input,
1502 output,
1503 descriptor);
1504}
1505
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001506bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1507 const TensorInfo& input1,
1508 const TensorInfo& output,
1509 Optional<std::string&> reasonIfUnsupported) const
1510{
1511 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1512 reasonIfUnsupported,
1513 input0,
1514 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001515 output,
1516 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001517}
1518
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001519bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1520 const TensorInfo& output,
1521 const TransposeConvolution2dDescriptor& descriptor,
1522 const TensorInfo& weights,
1523 const Optional<TensorInfo>& biases,
1524 Optional<std::string&> reasonIfUnsupported) const
1525{
1526 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1527 reasonIfUnsupported,
1528 input,
1529 output,
1530 descriptor,
1531 weights,
1532 biases);
1533}
1534
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001535bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1536 const TensorInfo& output,
1537 const TransposeDescriptor& descriptor,
1538 Optional<std::string&> reasonIfUnsupported) const
1539{
1540 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1541}
1542
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001543bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1544 const TensorInfo& outputStateIn,
1545 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001546 const TensorInfo& outputStateOut,
1547 const TensorInfo& cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001548 const TensorInfo& output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001549 const UnidirectionalSequenceLstmDescriptor& descriptor,
1550 const LstmInputParamsInfo& paramsInfo,
1551 Optional<std::string&> reasonIfUnsupported) const
1552{
1553 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1554 reasonIfUnsupported,
1555 input,
1556 outputStateIn,
1557 cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001558 outputStateOut,
1559 cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001560 output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001561 descriptor,
1562 paramsInfo);
1563}
1564
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001565} // namespace armnn