blob: ff2b576f3d4eca81901722f611fb188d5c644fc1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Mike Kelly3ec30772023-03-08 13:47:17 +00002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
Teresa Charlin94916a52022-10-19 08:48:07 +010025#include "workloads/ClBatchMatMulWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010026#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000027#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010028#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010029#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000030#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010031#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010032#include "workloads/ClConvertFp16ToFp32Workload.hpp"
33#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010034#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010035#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010036#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010037#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010038#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010039#include "workloads/ClDivisionWorkload.hpp"
John Mcloughlin34c1c382023-05-17 15:08:36 +010040#include "workloads/ClElementwiseBinaryWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010041#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010042#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000043#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010044#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010045#include "workloads/ClGatherWorkload.hpp"
Teresa Charlin989e2f62022-04-27 16:26:11 +010046#include "workloads/ClGatherNdWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010047#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010048#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010049#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010050#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000051#include "workloads/ClLogicalAndWorkload.hpp"
52#include "workloads/ClLogicalNotWorkload.hpp"
53#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010054#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000055#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010056#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010057#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000058#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010059#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000060#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010061#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010062#include "workloads/ClPadWorkload.hpp"
63#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010064#include "workloads/ClPooling2dWorkload.hpp"
Ryan OSheabab8fa92022-03-09 10:29:02 +000065#include "workloads/ClPooling3dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010066#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010067#include "workloads/ClQLstmWorkload.hpp"
68#include "workloads/ClQuantizedLstmWorkload.hpp"
69#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000070#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000071#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010072#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010073#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010074#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000075#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010076#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000077#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010078#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010079#include "workloads/ClSplitterWorkload.hpp"
Teresa Charlinaac61122022-05-05 16:11:36 +010080#include "workloads/ClSqrtWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010081#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000082#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010083#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010084#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000085#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000086#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000087#endif
88
telsoa014fcda012018-03-09 14:13:49 +000089
90namespace armnn
91{
arovir017c22c702018-10-09 11:16:46 +010092
telsoa014fcda012018-03-09 14:13:49 +000093namespace
94{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010095
telsoa014fcda012018-03-09 14:13:49 +000096template<unsigned int FilterSize>
97bool IsMatchingSize2d(const TensorInfo& weightInfo)
98{
telsoa01c577f2c2018-08-31 09:22:23 +010099 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +0000100 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
101}
102
103template<uint32_t ValidStride>
104bool IsMatchingStride(uint32_t actualStride)
105{
106 return ValidStride == actualStride;
107}
108
109template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
110bool IsMatchingStride(uint32_t actualStride)
111{
112 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100113}
telsoa014fcda012018-03-09 14:13:49 +0000114
Derek Lamberti901ea112019-12-10 22:07:09 +0000115template<typename ... Args>
116bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000117{
Jan Eilers8eb25602020-03-09 12:13:48 +0000118 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000119#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000120 return true;
121#else
arovir01085f0a42018-10-08 14:48:19 +0100122 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000123 {
arovir01085f0a42018-10-08 14:48:19 +0100124 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000125 }
126 return false;
127#endif
128}
129
Matteo Martincighd95e9062019-01-31 15:35:59 +0000130#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000131#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
132#else
133#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
134#endif
135
Matteo Martincighd95e9062019-01-31 15:35:59 +0000136#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000137template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100138inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000139{
140 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
141 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
142 if (!supported && reasonIfUnsupported)
143 {
arovir01085f0a42018-10-08 14:48:19 +0100144 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000145 }
146 return supported;
147}
148
149#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
150 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
151#else
152#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000153 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000154#endif
155
telsoa01c577f2c2018-08-31 09:22:23 +0100156template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100157bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000158 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100159 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000160 Uint8Func uint8FuncPtr,
161 Params&&... params)
162{
163 return IsClBackendSupported(reasonIfUnsupported) &&
164 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
165 dataType,
166 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100167 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000168 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000169 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000170 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000171 std::forward<Params>(params)...);
172}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100173} // anonymous namespace
174
Sadik Armagan045f6be2020-09-10 13:37:32 +0100175ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
176 : m_ModelContextPtr(modelContextPtr)
177{
178}
179
180ClLayerSupport::ClLayerSupport()
181 : m_ModelContextPtr(nullptr)
182{
183}
184
Cathal Corbett34b429c2021-12-24 12:24:40 +0000185bool ClLayerSupport::IsLayerSupported(const LayerType& type,
186 const std::vector<TensorInfo>& infos,
187 const BaseDescriptor& descriptor,
188 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
189 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
190 Optional<std::string&> reasonIfUnsupported) const
191{
192 switch (type)
193 {
194 case LayerType::Activation:
195 return IsActivationSupported(infos[0],
196 infos[1],
197 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
198 reasonIfUnsupported);
199 case LayerType::Addition:
Mike Kelly2c14db62023-03-15 15:06:23 +0000200 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000201 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000202 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000203 case LayerType::ArgMinMax:
204 return IsArgMinMaxSupported(infos[0],
205 infos[1],
206 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
207 reasonIfUnsupported);
Teresa Charlin94916a52022-10-19 08:48:07 +0100208 case LayerType::BatchMatMul:
209 return IsBatchMatMulSupported(infos[0],
210 infos[1],
211 infos[2],
212 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
213 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000214 case LayerType::BatchNormalization:
215 return IsBatchNormalizationSupported(infos[0],
216 infos[1],
217 infos[2],
218 infos[3],
219 infos[4],
220 infos[5],
221 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
222 (&descriptor)),
223 reasonIfUnsupported);
224 case LayerType::BatchToSpaceNd:
225 return IsBatchToSpaceNdSupported(infos[0],
226 infos[1],
227 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
228 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000229 case LayerType::Cast:
230 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
231 case LayerType::ChannelShuffle:
232 return IsChannelShuffleSupported(infos[0],
233 infos[1],
234 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
235 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000236 case LayerType::Comparison:
237 return IsComparisonSupported(infos[0],
238 infos[1],
239 infos[2],
240 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
241 reasonIfUnsupported);
242 case LayerType::Concat:
243 {
244 std::vector<const TensorInfo*> inputInfos;
245 for (uint32_t i = 0; i < (infos.size() - 1); i++)
246 {
247 inputInfos.push_back(&infos[i]);
248 }
249 return IsConcatSupported(inputInfos,
250 infos[infos.size() - 1],
251 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
252 reasonIfUnsupported);
253 }
254 case LayerType::Constant:
255 return IsConstantSupported(infos[0], reasonIfUnsupported);
256 case LayerType::ConvertFp16ToFp32:
257 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
258 case LayerType::ConvertFp32ToFp16:
259 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
260 case LayerType::Convolution2d:
261 {
262 if (infos.size() != 4)
263 {
264 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
265 "TensorInfos should be of format: {input, output, weights, biases}.");
266 }
267
268 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
269 if (infos[3] == TensorInfo())
270 {
271 return IsConvolution2dSupported(infos[0],
272 infos[1],
273 desc,
274 infos[2],
275 EmptyOptional(),
276 reasonIfUnsupported);
277 }
278 else
279 {
280 return IsConvolution2dSupported(infos[0],
281 infos[1],
282 desc,
283 infos[2],
284 infos[3],
285 reasonIfUnsupported);
286 }
287 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000288 case LayerType::Convolution3d:
289 {
290 if (infos.size() != 4)
291 {
292 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
293 "TensorInfos should be of format: {input, output, weights, biases}.");
294 }
295
296 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
297 if (infos[3] == TensorInfo())
298 {
299 return IsConvolution3dSupported(infos[0],
300 infos[1],
301 desc,
302 infos[2],
303 EmptyOptional(),
304 reasonIfUnsupported);
305 }
306 else
307 {
308 return IsConvolution3dSupported(infos[0],
309 infos[1],
310 desc,
311 infos[2],
312 infos[3],
313 reasonIfUnsupported);
314 }
315 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000316 case LayerType::DepthToSpace:
317 return IsDepthToSpaceSupported(infos[0],
318 infos[1],
319 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
320 reasonIfUnsupported);
321 case LayerType::DepthwiseConvolution2d:
322 {
323 if (infos.size() != 4)
324 {
325 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
326 "TensorInfos should be of format: {input, output, weights, biases}.");
327 }
328
329 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
330 if (infos[3] == TensorInfo())
331 {
332 return IsDepthwiseConvolutionSupported(infos[0],
333 infos[1],
334 desc,
335 infos[2],
336 EmptyOptional(),
337 reasonIfUnsupported);
338 }
339 else
340 {
341 return IsDepthwiseConvolutionSupported(infos[0],
342 infos[1],
343 desc,
344 infos[2],
345 infos[3],
346 reasonIfUnsupported);
347 }
348 }
349 case LayerType::Dequantize:
350 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
351 case LayerType::Division:
Mike Kelly2c14db62023-03-15 15:06:23 +0000352 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000353 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000354 ARMNN_NO_DEPRECATE_WARN_END
Mike Kelly3ec30772023-03-08 13:47:17 +0000355 case LayerType::ElementwiseBinary:
356 {
357 auto desc = *(PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor));
358
359 switch (desc.m_Operation)
360 {
361 case BinaryOperation::Add:
362 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
363 reasonIfUnsupported,
364 infos[0],
365 infos[1],
366 infos[2],
367 nullptr);
368 case BinaryOperation::Div:
369 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
370 reasonIfUnsupported,
371 infos[0],
372 infos[1],
373 infos[2],
374 nullptr);
375 case BinaryOperation::Minimum:
376 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
377 reasonIfUnsupported,
378 infos[0],
379 infos[1],
380 infos[2]);
381 case BinaryOperation::Maximum:
382 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
383 reasonIfUnsupported,
384 infos[0],
385 infos[1],
386 infos[2]);
387 case BinaryOperation::Mul:
388 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
389 reasonIfUnsupported,
390 infos[0],
391 infos[1],
392 infos[2],
393 nullptr);
John Mcloughlin34c1c382023-05-17 15:08:36 +0100394 case BinaryOperation::Power:
395 case BinaryOperation::SqDiff:
396 FORWARD_WORKLOAD_VALIDATE_FUNC(ClElementwiseBinaryValidate,
397 reasonIfUnsupported,
398 infos[0],
399 infos[1],
400 infos[2],
401 desc,
402 nullptr);
Mike Kelly3ec30772023-03-08 13:47:17 +0000403 case BinaryOperation::Sub:
404 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
405 reasonIfUnsupported,
406 infos[0],
407 infos[1],
408 infos[2],
409 nullptr);
410 default:
411 return false;
412 }
413 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000414 case LayerType::ElementwiseUnary:
415 return IsElementwiseUnarySupported(infos[0],
416 infos[1],
417 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
418 reasonIfUnsupported);
419 case LayerType::Fill:
420 return IsFillSupported(infos[0],
421 infos[1],
422 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
423 reasonIfUnsupported);
424 case LayerType::Floor:
425 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
426 case LayerType::FullyConnected:
427 return IsFullyConnectedSupported(infos[0],
428 infos[1],
429 infos[2],
430 infos[3],
431 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
432 reasonIfUnsupported);
433 case LayerType::Gather:
434 return IsGatherSupported(infos[0],
435 infos[1],
436 infos[2],
437 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
438 reasonIfUnsupported);
Teresa Charlin989e2f62022-04-27 16:26:11 +0100439 case LayerType::GatherNd:
440 return IsGatherNdSupported(infos[0],
441 infos[1],
442 infos[2],
443 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000444 case LayerType::Input:
445 return IsInputSupported(infos[0], reasonIfUnsupported);
446 case LayerType::InstanceNormalization:
447 return IsInstanceNormalizationSupported(infos[0],
448 infos[1],
449 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
450 (&descriptor)),
451 reasonIfUnsupported);
452 case LayerType::L2Normalization:
453 return IsL2NormalizationSupported(infos[0],
454 infos[1],
455 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
456 reasonIfUnsupported);
457 case LayerType::LogicalBinary:
458 return IsLogicalBinarySupported(infos[0],
459 infos[1],
460 infos[2],
461 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
462 reasonIfUnsupported);
463 case LayerType::LogSoftmax:
464 return IsLogSoftmaxSupported(infos[0],
465 infos[1],
466 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
467 reasonIfUnsupported);
468 case LayerType::Lstm:
469 return IsLstmSupported(infos[0],
470 infos[1],
471 infos[2],
472 infos[3],
473 infos[4],
474 infos[5],
475 infos[6],
476 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
477 lstmParamsInfo.value(),
478 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000479 case LayerType::Map:
480 return true;
481 case LayerType::MemCopy:
482 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
483 case LayerType::MemImport:
484 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
485 case LayerType::Merge:
486 return LayerSupportBase::IsMergeSupported(infos[0],
487 infos[1],
488 infos[2],
489 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000490 case LayerType::Maximum:
Mike Kelly2c14db62023-03-15 15:06:23 +0000491 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000492 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000493 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000494 case LayerType::Mean:
495 return IsMeanSupported(infos[0],
496 infos[1],
497 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
498 reasonIfUnsupported);
499 case LayerType::Minimum:
Mike Kelly2c14db62023-03-15 15:06:23 +0000500 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000501 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000502 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000503 case LayerType::Multiplication:
Mike Kelly2c14db62023-03-15 15:06:23 +0000504 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000505 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000506 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000507 case LayerType::Normalization:
508 return IsNormalizationSupported(infos[0],
509 infos[1],
510 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
511 reasonIfUnsupported);
512 case LayerType::Output:
513 return IsOutputSupported(infos[0], reasonIfUnsupported);
514 case LayerType::Pad:
515 return IsPadSupported(infos[0],
516 infos[1],
517 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
518 reasonIfUnsupported);
519 case LayerType::Permute:
520 return IsPermuteSupported(infos[0],
521 infos[1],
522 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
523 reasonIfUnsupported);
524 case LayerType::Pooling2d:
525 return IsPooling2dSupported(infos[0],
526 infos[1],
527 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
528 reasonIfUnsupported);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000529 case LayerType::Pooling3d:
530 return IsPooling3dSupported(infos[0],
531 infos[1],
532 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
533 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000534 case LayerType::Prelu:
535 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000536 case LayerType::QLstm:
537 return IsQLstmSupported(infos[0],
538 infos[1],
539 infos[2],
540 infos[3],
541 infos[4],
542 infos[5],
543 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
544 lstmParamsInfo.value(),
545 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000546 case LayerType::Quantize:
547 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
548 case LayerType::QuantizedLstm:
549 return IsQuantizedLstmSupported(infos[0],
550 infos[1],
551 infos[2],
552 infos[3],
553 infos[4],
554 quantizedLstmParamsInfo.value(),
555 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000556 case LayerType::Rank:
557 return true;
558 case LayerType::Reduce:
559 return IsReduceSupported(infos[0],
560 infos[1],
561 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
562 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000563 case LayerType::Reshape:
564 return IsReshapeSupported(infos[0],
565 infos[1],
566 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
567 reasonIfUnsupported);
568 case LayerType::Resize:
569 return IsResizeSupported(infos[0],
570 infos[1],
571 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
572 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000573 case LayerType::Shape:
574 return LayerSupportBase::IsShapeSupported(infos[0],
575 infos[1],
576 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000577 case LayerType::Slice:
578 return IsSliceSupported(infos[0],
579 infos[1],
580 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
581 reasonIfUnsupported);
582 case LayerType::Softmax:
583 return IsSoftmaxSupported(infos[0],
584 infos[1],
585 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
586 reasonIfUnsupported);
587 case LayerType::SpaceToBatchNd:
588 return IsSpaceToBatchNdSupported(infos[0],
589 infos[1],
590 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
591 reasonIfUnsupported);
592 case LayerType::SpaceToDepth:
593 return IsSpaceToDepthSupported(infos[0],
594 infos[1],
595 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
596 reasonIfUnsupported);
597 case LayerType::Splitter:
598 {
599 std::vector<TensorInfo> outputInfos;
600 for (uint32_t i = 1; i < infos.size(); i++)
601 {
602 outputInfos.push_back(infos[i]);
603 }
604 return IsSplitterSupported(infos[0],
605 {outputInfos.begin(), outputInfos.end()},
606 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
607 reasonIfUnsupported);
608 }
609 case LayerType::Stack:
610 {
611 std::vector<const TensorInfo*> inputInfos;
612 for (uint32_t i = 0; i < infos.size() - 1; i++)
613 {
614 inputInfos.push_back(&infos[i]);
615 }
616 return IsStackSupported(inputInfos,
617 infos[infos.size() - 1],
618 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
619 reasonIfUnsupported);
620 }
621 case LayerType::StridedSlice:
622 return IsStridedSliceSupported(infos[0],
623 infos[1],
624 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
625 reasonIfUnsupported);
626 case LayerType::Subtraction:
Mike Kelly2c14db62023-03-15 15:06:23 +0000627 ARMNN_NO_DEPRECATE_WARN_BEGIN
Cathal Corbett34b429c2021-12-24 12:24:40 +0000628 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Mike Kelly2c14db62023-03-15 15:06:23 +0000629 ARMNN_NO_DEPRECATE_WARN_END
Cathal Corbett34b429c2021-12-24 12:24:40 +0000630 case LayerType::Transpose:
631 return IsTransposeSupported(infos[0],
632 infos[1],
633 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
634 reasonIfUnsupported);
635 case LayerType::TransposeConvolution2d:
636 {
637 if (infos.size() != 4)
638 {
639 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
640 "TensorInfos should be of format: {input, output, weights, biases}.");
641 }
642
643 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
644 if (infos[3] == TensorInfo())
645 {
646 return IsTransposeConvolution2dSupported(infos[0],
647 infos[1],
648 desc,
649 infos[2],
650 EmptyOptional(),
651 reasonIfUnsupported);
652 }
653 else
654 {
655 return IsTransposeConvolution2dSupported(infos[0],
656 infos[1],
657 desc,
658 infos[2],
659 infos[3],
660 reasonIfUnsupported);
661 }
662 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000663 case LayerType::UnidirectionalSequenceLstm:
664 return IsUnidirectionalSequenceLstmSupported(infos[0],
665 infos[1],
666 infos[2],
667 infos[3],
668 infos[4],
669 infos[5],
670 *(PolymorphicDowncast<const
671 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
672 lstmParamsInfo.value(),
673 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000674 case LayerType::Unmap:
675 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000676 default:
677 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000678 // debug, detectionpostprocess, fakequantization,
679 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000680 return false;
681 }
682}
683
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100684bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
685 const TensorInfo& output,
686 const ActivationDescriptor& descriptor,
687 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000688{
telsoa01c577f2c2018-08-31 09:22:23 +0100689 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
690 reasonIfUnsupported,
691 input,
692 output,
693 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000694}
695
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100696bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
697 const TensorInfo& input1,
698 const TensorInfo& output,
699 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000700{
arovir01085f0a42018-10-08 14:48:19 +0100701 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
702 reasonIfUnsupported,
703 input0,
704 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000705 output,
706 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000707}
708
James Conroy2dc05722019-09-19 17:00:31 +0100709bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
710 const TensorInfo& output,
711 const ArgMinMaxDescriptor& descriptor,
712 Optional<std::string&> reasonIfUnsupported) const
713{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000714
James Conroy2dc05722019-09-19 17:00:31 +0100715 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
716 reasonIfUnsupported,
717 input,
718 output,
719 descriptor);
720}
721
Teresa Charlin94916a52022-10-19 08:48:07 +0100722bool ClLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX,
723 const TensorInfo& inputY,
724 const TensorInfo& output,
725 const BatchMatMulDescriptor& descriptor,
726 Optional<std::string&> reasonIfUnsupported) const
727{
728 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchMatMulValidate,
729 reasonIfUnsupported,
730 inputX,
731 inputY,
732 output,
Teresa Charlin97a3aef2023-01-10 10:32:51 +0000733 descriptor,
734 nullptr);
Teresa Charlin94916a52022-10-19 08:48:07 +0100735}
736
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100737bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
738 const TensorInfo& output,
739 const TensorInfo& mean,
740 const TensorInfo& var,
741 const TensorInfo& beta,
742 const TensorInfo& gamma,
743 const BatchNormalizationDescriptor& descriptor,
744 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000745{
telsoa01c577f2c2018-08-31 09:22:23 +0100746 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
747 reasonIfUnsupported,
748 input,
749 output,
750 mean,
751 var,
752 beta,
753 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000754 descriptor,
755 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000756}
757
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100758bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
759 const TensorInfo& output,
760 const BatchToSpaceNdDescriptor& descriptor,
761 Optional<std::string&> reasonIfUnsupported) const
762{
763 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
764 reasonIfUnsupported,
765 input,
766 output,
767 descriptor);
768}
769
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100770bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
771 const TensorInfo& output,
772 Optional<std::string&> reasonIfUnsupported) const
773{
774 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
775 reasonIfUnsupported,
776 input,
777 output);
778}
779
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100780bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000781 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100782 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000783 Optional<std::string&> reasonIfUnsupported) const
784{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100785 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000786 reasonIfUnsupported,
787 input,
788 output,
789 descriptor);
790}
791
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100792bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
793 const TensorInfo& input1,
794 const TensorInfo& output,
795 const ComparisonDescriptor& descriptor,
796 Optional<std::string&> reasonIfUnsupported) const
797{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000798 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
799 reasonIfUnsupported,
800 input0,
801 input1,
802 output,
803 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100804}
805
Jim Flynn906f9462019-05-10 13:55:21 +0100806bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
807 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000808 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100809 Optional<std::string&> reasonIfUnsupported) const
810{
Jim Flynne242f2d2019-05-22 14:24:13 +0100811 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
812 {
813 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
814 return false;
815 }
816
817 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
818 if(concatInnerAxis < 3) // Width, height, or channels
819 {
820 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
821 reasonIfUnsupported,
822 inputs,
823 output,
824 descriptor);
825 }
826 else if (concatInnerAxis == 3)
827 {
828 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
829 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
830 for (auto& input : inputs)
831 {
832 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
833 {
834 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
835 return false;
836 }
837 }
838 return true; // Sub-tensors support concat along batch
839 }
840 else // > 4 dimensions not supported.
841 {
842 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
843 return false;
844 }
Jim Flynn906f9462019-05-10 13:55:21 +0100845}
846
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100847bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
848 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000849{
Mike Kelly0886ac42020-04-27 09:55:40 +0100850 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
851 reasonIfUnsupported,
852 output);
telsoa014fcda012018-03-09 14:13:49 +0000853}
854
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100855bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
856 const TensorInfo& output,
857 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000858{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100859 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
860 reasonIfUnsupported,
861 input,
862 output);
telsoa014fcda012018-03-09 14:13:49 +0000863}
864
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100865bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
866 const TensorInfo& output,
867 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000868{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100869 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
870 reasonIfUnsupported,
871 input,
872 output);
telsoa014fcda012018-03-09 14:13:49 +0000873}
874
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100875bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
876 const TensorInfo& output,
877 const Convolution2dDescriptor& descriptor,
878 const TensorInfo& weights,
879 const Optional<TensorInfo>& biases,
880 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000881{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100882 bool isFastMathEnabled = false;
883#if defined(ARMCOMPUTECL_ENABLED)
884 if (m_ModelContextPtr)
885 {
886 if (m_ModelContextPtr.get() != nullptr)
887 {
888 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
889 if (modelOptions)
890 {
891 isFastMathEnabled = modelOptions->IsFastMathEnabled();
892 }
893 }
894 }
895#endif
896
surmeh013537c2c2018-05-18 16:31:43 +0100897 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
898 reasonIfUnsupported,
899 input,
900 output,
901 descriptor,
902 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100903 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000904 isFastMathEnabled,
905 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000906}
907
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100908bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
909 const TensorInfo& output,
910 const Convolution3dDescriptor& descriptor,
911 const TensorInfo& weights,
912 const Optional<TensorInfo>& biases,
913 Optional<std::string&> reasonIfUnsupported) const
914{
915 bool isFastMathEnabled = false;
916#if defined(ARMCOMPUTECL_ENABLED)
917 if (m_ModelContextPtr)
918{
919 if (m_ModelContextPtr.get() != nullptr)
920 {
921 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
922 if (modelOptions)
923 {
924 isFastMathEnabled = modelOptions->IsFastMathEnabled();
925 }
926 }
927}
928#endif
929
930 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
931 reasonIfUnsupported,
932 input,
933 output,
934 descriptor,
935 weights,
936 biases,
937 isFastMathEnabled,
938 nullptr);
939}
940
Jim Flynn983daec2019-05-29 16:20:16 +0100941bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
942 const TensorInfo& output,
943 Optional<std::string&> reasonIfUnsupported) const
944{
945 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
946 reasonIfUnsupported,
947 input,
948 output);
949}
950
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100951bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
952 const TensorInfo& output,
953 const DepthToSpaceDescriptor& descriptor,
954 Optional<std::string&> reasonIfUnsupported) const
955{
956 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
957 reasonIfUnsupported,
958 input,
959 output,
960 descriptor);
961}
962
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100963bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
964 const TensorInfo& output,
965 const DepthwiseConvolution2dDescriptor& descriptor,
966 const TensorInfo& weights,
967 const Optional<TensorInfo>& biases,
968 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000969{
telsoa01c577f2c2018-08-31 09:22:23 +0100970 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
971 reasonIfUnsupported,
972 input,
973 output,
974 descriptor,
975 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000976 biases,
977 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000978}
979
Pablo Tellof0bd6832019-04-26 17:58:13 +0100980bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
981 const TensorInfo& output,
982 const DepthwiseConvolution2dDescriptor& descriptor,
983 const TensorInfo& weights,
984 const Optional<TensorInfo>& biases,
985 Optional<std::string&> reasonIfUnsupported) const
986{
987 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
988 reasonIfUnsupported,
989 input,
990 output,
991 descriptor,
992 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000993 biases,
994 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100995}
996
997
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100998bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
999 const TensorInfo& input1,
1000 const TensorInfo& output,
1001 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001002{
1003 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
1004 reasonIfUnsupported,
1005 input0,
1006 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001007 output,
1008 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +01001009}
1010
josh minor4a3c6102020-01-06 16:40:46 -06001011bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
1012 const TensorInfo& output,
1013 const ElementwiseUnaryDescriptor& descriptor,
1014 Optional<std::string&> reasonIfUnsupported) const
1015{
Sadik Armagan9fabf432020-05-27 13:40:58 +01001016 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -06001017 {
Sadik Armagan9fabf432020-05-27 13:40:58 +01001018 case UnaryOperation::Abs:
1019 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
1020 reasonIfUnsupported,
1021 input,
1022 output);
1023 case UnaryOperation::Exp:
1024 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
1025 reasonIfUnsupported,
1026 input,
1027 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +01001028 case UnaryOperation::Log:
1029 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
1030 reasonIfUnsupported,
1031 input,
1032 output);
1033 case UnaryOperation::LogicalNot:
1034 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
1035 reasonIfUnsupported,
1036 input,
1037 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +01001038 case UnaryOperation::Neg:
1039 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
1040 reasonIfUnsupported,
1041 input,
1042 output);
1043 case UnaryOperation::Rsqrt:
1044 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
1045 reasonIfUnsupported,
1046 input,
1047 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +01001048 case UnaryOperation::Sin:
1049 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +00001050 reasonIfUnsupported,
1051 input,
1052 output);
Teresa Charlinaac61122022-05-05 16:11:36 +01001053 case UnaryOperation::Sqrt:
1054 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSqrtWorkloadValidate,
1055 reasonIfUnsupported,
1056 input,
1057 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +01001058 default:
1059 return false;
josh minor4a3c6102020-01-06 16:40:46 -06001060 }
josh minor4a3c6102020-01-06 16:40:46 -06001061}
1062
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001063bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
1064 const TensorInfo& output,
1065 const FillDescriptor& descriptor,
1066 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +01001067{
Teresa Charlin4b10fef2020-07-29 09:36:41 +01001068 armnn::IgnoreUnused(input);
1069 armnn::IgnoreUnused(output);
1070 armnn::IgnoreUnused(descriptor);
1071
1072 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +01001073}
1074
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001075bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
1076 const TensorInfo& output,
1077 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001078{
Sadik Armagan9be49162019-10-30 16:15:26 +00001079 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
1080 reasonIfUnsupported,
1081 input,
1082 output);
telsoa01c577f2c2018-08-31 09:22:23 +01001083}
1084
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001085bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
1086 const TensorInfo& output,
1087 const TensorInfo& weights,
1088 const TensorInfo& biases,
1089 const FullyConnectedDescriptor& descriptor,
1090 Optional<std::string&> reasonIfUnsupported) const
1091{
1092 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
1093 reasonIfUnsupported,
1094 input,
1095 output,
1096 weights,
1097 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001098 descriptor,
1099 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001100}
1101
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001102bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1103 const TensorInfo& input1,
1104 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001105 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001106 Optional<std::string&> reasonIfUnsupported) const
1107{
1108 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1109 reasonIfUnsupported,
1110 input0,
1111 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001112 output,
1113 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001114}
1115
Teresa Charlin989e2f62022-04-27 16:26:11 +01001116bool ClLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1117 const TensorInfo& input1,
1118 const TensorInfo& output,
1119 Optional<std::string&> reasonIfUnsupported) const
1120{
1121 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherNdWorkloadValidate,
1122 reasonIfUnsupported,
1123 input0,
1124 input1,
1125 output);
1126}
1127
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001128bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1129 Optional<std::string&> reasonIfUnsupported) const
1130{
Derek Lamberti901ea112019-12-10 22:07:09 +00001131 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001132}
1133
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001134bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1135 const TensorInfo& output,
1136 const InstanceNormalizationDescriptor& descriptor,
1137 Optional<std::string&> reasonIfUnsupported) const
1138{
1139 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1140 reasonIfUnsupported,
1141 input,
1142 output,
1143 descriptor);
1144}
1145
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001146bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1147 const TensorInfo& output,
1148 const L2NormalizationDescriptor& descriptor,
1149 Optional<std::string&> reasonIfUnsupported) const
1150{
1151 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1152 reasonIfUnsupported,
1153 input,
1154 output,
1155 descriptor);
1156}
1157
James Conroyfe3ec942020-11-18 14:20:53 +00001158bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1159 const TensorInfo& input1,
1160 const TensorInfo& output,
1161 const LogicalBinaryDescriptor& descriptor,
1162 Optional<std::string&> reasonIfUnsupported) const
1163{
1164 IgnoreUnused(output);
1165
1166 switch(descriptor.m_Operation)
1167 {
1168 case LogicalBinaryOperation::LogicalAnd:
1169 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1170 reasonIfUnsupported,
1171 input0,
1172 input1,
1173 output);
1174 case LogicalBinaryOperation::LogicalOr:
1175 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1176 reasonIfUnsupported,
1177 input0,
1178 input1,
1179 output);
1180 default:
1181 return false;
1182 }
1183}
1184
1185
Teresa Charlin8398edc2020-07-20 14:23:02 +01001186bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1187 const TensorInfo& output,
1188 const LogSoftmaxDescriptor& descriptor,
1189 Optional<std::string&> reasonIfUnsupported) const
1190{
1191 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1192 reasonIfUnsupported,
1193 input,
1194 output,
1195 descriptor);
1196}
1197
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001198bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1199 const TensorInfo& outputStateIn,
1200 const TensorInfo& cellStateIn,
1201 const TensorInfo& scratchBuffer,
1202 const TensorInfo& outputStateOut,
1203 const TensorInfo& cellStateOut,
1204 const TensorInfo& output,
1205 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001206 const LstmInputParamsInfo& paramsInfo,
1207 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001208{
arovir01085f0a42018-10-08 14:48:19 +01001209 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1210 reasonIfUnsupported,
1211 input,
1212 outputStateIn,
1213 cellStateIn,
1214 scratchBuffer,
1215 outputStateOut,
1216 cellStateOut,
1217 output,
1218 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001219 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001220}
1221
keidav01a959ee52018-12-19 10:04:58 +00001222bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1223 const TensorInfo& input1,
1224 const TensorInfo& output,
1225 Optional<std::string&> reasonIfUnsupported) const
1226{
1227 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1228 reasonIfUnsupported,
1229 input0,
1230 input1,
1231 output);
1232}
1233
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001234bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1235 const TensorInfo& output,
1236 const MeanDescriptor& descriptor,
1237 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001238{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001239 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1240 reasonIfUnsupported,
1241 input,
1242 output,
1243 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001244}
1245
saoste019292aa32019-01-08 13:55:59 +00001246bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1247 const TensorInfo& input1,
1248 const TensorInfo& output,
1249 Optional<std::string&> reasonIfUnsupported) const
1250{
1251 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1252 reasonIfUnsupported,
1253 input0,
1254 input1,
1255 output);
1256}
1257
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001258bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1259 const TensorInfo& input1,
1260 const TensorInfo& output,
1261 Optional<std::string&> reasonIfUnsupported) const
1262{
1263 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1264 reasonIfUnsupported,
1265 input0,
1266 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001267 output,
1268 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001269}
1270
1271bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1272 const TensorInfo& output,
1273 const NormalizationDescriptor& descriptor,
1274 Optional<std::string&> reasonIfUnsupported) const
1275{
1276 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1277}
1278
1279bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1280 Optional<std::string&> reasonIfUnsupported) const
1281{
Derek Lamberti901ea112019-12-10 22:07:09 +00001282 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001283}
1284
1285bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1286 const TensorInfo& output,
1287 const PadDescriptor& descriptor,
1288 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001289{
1290 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1291 reasonIfUnsupported,
1292 input,
1293 output,
1294 descriptor);
1295}
1296
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001297bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1298 const TensorInfo& output,
1299 const PermuteDescriptor& descriptor,
1300 Optional<std::string&> reasonIfUnsupported) const
1301{
Matthew Bentham9820d302019-11-27 17:24:47 +00001302 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001303}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001304
1305bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1306 const TensorInfo& output,
1307 const Pooling2dDescriptor& descriptor,
1308 Optional<std::string&> reasonIfUnsupported) const
1309{
1310 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1311}
1312
Ryan OSheabab8fa92022-03-09 10:29:02 +00001313bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1314 const TensorInfo& output,
1315 const Pooling3dDescriptor& descriptor,
1316 Optional<std::string&> reasonIfUnsupported) const
1317{
1318 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1319}
1320
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001321bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1322 const armnn::TensorInfo &alpha,
1323 const armnn::TensorInfo &output,
1324 armnn::Optional<std::string &> reasonIfUnsupported) const
1325{
1326 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1327}
1328
Ryan OShea2323af42020-05-13 16:36:19 +01001329bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1330 const TensorInfo& previousOutputIn,
1331 const TensorInfo& previousCellStateIn,
1332 const TensorInfo& outputStateOut,
1333 const TensorInfo& cellStateOut,
1334 const TensorInfo& output,
1335 const QLstmDescriptor& descriptor,
1336 const LstmInputParamsInfo& paramsInfo,
1337 Optional<std::string&> reasonIfUnsupported) const
1338{
1339 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1340 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1341 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1342 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1343 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1344 output.GetDataType() == armnn::DataType::QAsymmS8)
1345 {
1346 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1347 reasonIfUnsupported,
1348 input,
1349 previousCellStateIn,
1350 previousOutputIn,
1351 cellStateOut,
1352 outputStateOut,
1353 output,
1354 descriptor,
1355 paramsInfo);
1356 }
1357 else
1358 {
1359 return false;
1360 }
1361}
1362
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001363bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1364 const TensorInfo& previousCellStateIn,
1365 const TensorInfo& previousOutputIn,
1366 const TensorInfo& cellStateOut,
1367 const TensorInfo& output,
1368 const QuantizedLstmInputParamsInfo& paramsInfo,
1369 Optional<std::string&> reasonIfUnsupported) const
1370{
1371 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1372 reasonIfUnsupported,
1373 input,
1374 previousCellStateIn,
1375 previousOutputIn,
1376 cellStateOut,
1377 output,
1378 paramsInfo);
1379}
1380
Sadik Armagan20ec2492019-05-31 09:09:44 +01001381bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1382 const TensorInfo& output,
1383 Optional<std::string&> reasonIfUnsupported) const
1384{
1385 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1386 reasonIfUnsupported,
1387 input,
1388 output);
1389}
1390
Sadik Armagana2747482021-02-09 10:28:54 +00001391bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1392 const TensorInfo& output,
1393 const ReduceDescriptor& descriptor,
1394 Optional<std::string&> reasonIfUnsupported) const
1395{
1396 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1397 reasonIfUnsupported,
1398 input,
1399 output,
1400 descriptor);
1401}
1402
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001403bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001404 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001405 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001406 Optional<std::string&> reasonIfUnsupported) const
1407{
Jan Eilers8eb25602020-03-09 12:13:48 +00001408 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001409 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001410}
1411
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001412bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1413 const TensorInfo& output,
1414 const ResizeDescriptor& descriptor,
1415 Optional<std::string&> reasonIfUnsupported) const
1416{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001417 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001418}
1419
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001420bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1421 const TensorInfo& output,
1422 const SliceDescriptor& descriptor,
1423 Optional<std::string&> reasonIfUnsupported) const
1424{
1425 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1426}
1427
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001428bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1429 const TensorInfo& output,
1430 const SoftmaxDescriptor& descriptor,
1431 Optional<std::string&> reasonIfUnsupported) const
1432{
Francis Murtagh3b938352019-07-26 15:44:17 +01001433 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001434}
1435
Sadik Armaganf4464322018-12-20 16:19:12 +00001436bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1437 const TensorInfo& output,
1438 const SpaceToBatchNdDescriptor& descriptor,
1439 Optional<std::string&> reasonIfUnsupported) const
1440{
1441 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1442 reasonIfUnsupported,
1443 input,
1444 output,
1445 descriptor);
1446}
1447
James Conroyd2aa85e2019-07-01 17:12:40 +01001448bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1449 const TensorInfo& output,
1450 const SpaceToDepthDescriptor& descriptor,
1451 Optional<std::string&> reasonIfUnsupported) const
1452{
1453 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1454 reasonIfUnsupported,
1455 input,
1456 output,
1457 descriptor);
1458}
1459
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001460bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001461 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1462 const ViewsDescriptor& descriptor,
1463 Optional<std::string&> reasonIfUnsupported) const
1464{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001465#if defined(ARMCOMPUTECL_ENABLED)
1466 // Split along the last dimension, cannot use sub-tensors
1467 // as width and height of the sub-tensors do not match
1468 // the width and height of the parent tensor
1469 // in case of input with more than 2D.
1470 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1471 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1472 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1473 {
1474 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1475 reasonIfUnsupported,
1476 input,
1477 outputs,
1478 *splitAxis.begin());
1479 }
1480#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001481 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001482 for (auto output : outputs)
1483 {
1484 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1485 {
1486 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1487 return false;
1488 }
1489 }
1490 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001491}
1492
Matthew Jacksond5166102019-07-31 14:06:28 +01001493bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1494 const TensorInfo& output,
1495 const StackDescriptor& descriptor,
1496 Optional<std::string&> reasonIfUnsupported) const
1497{
1498 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1499 reasonIfUnsupported,
1500 inputs,
1501 output,
1502 descriptor);
1503}
1504
keidav01d74dc912018-12-10 18:16:07 +00001505bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1506 const TensorInfo& output,
1507 const StridedSliceDescriptor& descriptor,
1508 Optional<std::string&> reasonIfUnsupported) const
1509{
1510 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1511 reasonIfUnsupported,
1512 input,
1513 output,
1514 descriptor);
1515}
1516
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001517bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1518 const TensorInfo& input1,
1519 const TensorInfo& output,
1520 Optional<std::string&> reasonIfUnsupported) const
1521{
1522 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1523 reasonIfUnsupported,
1524 input0,
1525 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001526 output,
1527 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001528}
1529
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001530bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1531 const TensorInfo& output,
1532 const TransposeConvolution2dDescriptor& descriptor,
1533 const TensorInfo& weights,
1534 const Optional<TensorInfo>& biases,
1535 Optional<std::string&> reasonIfUnsupported) const
1536{
1537 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1538 reasonIfUnsupported,
1539 input,
1540 output,
1541 descriptor,
1542 weights,
1543 biases);
1544}
1545
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001546bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1547 const TensorInfo& output,
1548 const TransposeDescriptor& descriptor,
1549 Optional<std::string&> reasonIfUnsupported) const
1550{
1551 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1552}
1553
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001554bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1555 const TensorInfo& outputStateIn,
1556 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001557 const TensorInfo& outputStateOut,
1558 const TensorInfo& cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001559 const TensorInfo& output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001560 const UnidirectionalSequenceLstmDescriptor& descriptor,
1561 const LstmInputParamsInfo& paramsInfo,
1562 Optional<std::string&> reasonIfUnsupported) const
1563{
1564 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1565 reasonIfUnsupported,
1566 input,
1567 outputStateIn,
1568 cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001569 outputStateOut,
1570 cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001571 output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001572 descriptor,
1573 paramsInfo);
1574}
1575
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001576} // namespace armnn