blob: a61a5bb6401b5c74f07c296398723d31b84475e1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010025#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000026#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010027#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010028#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000029#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010030#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010031#include "workloads/ClConvertFp16ToFp32Workload.hpp"
32#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010033#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010034#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010035#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010036#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010037#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010038#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010039#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010040#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000041#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010042#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010043#include "workloads/ClGatherWorkload.hpp"
Teresa Charlin989e2f62022-04-27 16:26:11 +010044#include "workloads/ClGatherNdWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010045#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010046#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010047#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010048#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000049#include "workloads/ClLogicalAndWorkload.hpp"
50#include "workloads/ClLogicalNotWorkload.hpp"
51#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010052#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000053#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010054#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010055#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000056#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010057#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000058#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010059#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010060#include "workloads/ClPadWorkload.hpp"
61#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010062#include "workloads/ClPooling2dWorkload.hpp"
Ryan OSheabab8fa92022-03-09 10:29:02 +000063#include "workloads/ClPooling3dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010064#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010065#include "workloads/ClQLstmWorkload.hpp"
66#include "workloads/ClQuantizedLstmWorkload.hpp"
67#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000068#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000069#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010070#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010071#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010072#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000073#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010074#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000075#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010076#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010077#include "workloads/ClSplitterWorkload.hpp"
Teresa Charlinaac61122022-05-05 16:11:36 +010078#include "workloads/ClSqrtWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010079#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000080#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010081#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010082#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000083#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000084#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000085#endif
86
telsoa014fcda012018-03-09 14:13:49 +000087
88namespace armnn
89{
arovir017c22c702018-10-09 11:16:46 +010090
telsoa014fcda012018-03-09 14:13:49 +000091namespace
92{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010093
telsoa014fcda012018-03-09 14:13:49 +000094template<unsigned int FilterSize>
95bool IsMatchingSize2d(const TensorInfo& weightInfo)
96{
telsoa01c577f2c2018-08-31 09:22:23 +010097 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000098 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
99}
100
101template<uint32_t ValidStride>
102bool IsMatchingStride(uint32_t actualStride)
103{
104 return ValidStride == actualStride;
105}
106
107template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
108bool IsMatchingStride(uint32_t actualStride)
109{
110 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100111}
telsoa014fcda012018-03-09 14:13:49 +0000112
Derek Lamberti901ea112019-12-10 22:07:09 +0000113template<typename ... Args>
114bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000115{
Jan Eilers8eb25602020-03-09 12:13:48 +0000116 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000117#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000118 return true;
119#else
arovir01085f0a42018-10-08 14:48:19 +0100120 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000121 {
arovir01085f0a42018-10-08 14:48:19 +0100122 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000123 }
124 return false;
125#endif
126}
127
Matteo Martincighd95e9062019-01-31 15:35:59 +0000128#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000129#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
130#else
131#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
132#endif
133
Matteo Martincighd95e9062019-01-31 15:35:59 +0000134#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000135template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100136inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000137{
138 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
139 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
140 if (!supported && reasonIfUnsupported)
141 {
arovir01085f0a42018-10-08 14:48:19 +0100142 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000143 }
144 return supported;
145}
146
147#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
148 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
149#else
150#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000151 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000152#endif
153
telsoa01c577f2c2018-08-31 09:22:23 +0100154template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100155bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000156 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100157 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000158 Uint8Func uint8FuncPtr,
159 Params&&... params)
160{
161 return IsClBackendSupported(reasonIfUnsupported) &&
162 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
163 dataType,
164 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100165 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000166 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000167 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000168 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000169 std::forward<Params>(params)...);
170}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100171} // anonymous namespace
172
Sadik Armagan045f6be2020-09-10 13:37:32 +0100173ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
174 : m_ModelContextPtr(modelContextPtr)
175{
176}
177
178ClLayerSupport::ClLayerSupport()
179 : m_ModelContextPtr(nullptr)
180{
181}
182
Cathal Corbett34b429c2021-12-24 12:24:40 +0000183bool ClLayerSupport::IsLayerSupported(const LayerType& type,
184 const std::vector<TensorInfo>& infos,
185 const BaseDescriptor& descriptor,
186 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
187 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
188 Optional<std::string&> reasonIfUnsupported) const
189{
190 switch (type)
191 {
192 case LayerType::Activation:
193 return IsActivationSupported(infos[0],
194 infos[1],
195 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
196 reasonIfUnsupported);
197 case LayerType::Addition:
198 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
199 case LayerType::ArgMinMax:
200 return IsArgMinMaxSupported(infos[0],
201 infos[1],
202 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
203 reasonIfUnsupported);
204 case LayerType::BatchNormalization:
205 return IsBatchNormalizationSupported(infos[0],
206 infos[1],
207 infos[2],
208 infos[3],
209 infos[4],
210 infos[5],
211 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
212 (&descriptor)),
213 reasonIfUnsupported);
214 case LayerType::BatchToSpaceNd:
215 return IsBatchToSpaceNdSupported(infos[0],
216 infos[1],
217 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
218 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000219 case LayerType::Cast:
220 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
221 case LayerType::ChannelShuffle:
222 return IsChannelShuffleSupported(infos[0],
223 infos[1],
224 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
225 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000226 case LayerType::Comparison:
227 return IsComparisonSupported(infos[0],
228 infos[1],
229 infos[2],
230 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
231 reasonIfUnsupported);
232 case LayerType::Concat:
233 {
234 std::vector<const TensorInfo*> inputInfos;
235 for (uint32_t i = 0; i < (infos.size() - 1); i++)
236 {
237 inputInfos.push_back(&infos[i]);
238 }
239 return IsConcatSupported(inputInfos,
240 infos[infos.size() - 1],
241 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
242 reasonIfUnsupported);
243 }
244 case LayerType::Constant:
245 return IsConstantSupported(infos[0], reasonIfUnsupported);
246 case LayerType::ConvertFp16ToFp32:
247 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
248 case LayerType::ConvertFp32ToFp16:
249 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
250 case LayerType::Convolution2d:
251 {
252 if (infos.size() != 4)
253 {
254 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
255 "TensorInfos should be of format: {input, output, weights, biases}.");
256 }
257
258 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
259 if (infos[3] == TensorInfo())
260 {
261 return IsConvolution2dSupported(infos[0],
262 infos[1],
263 desc,
264 infos[2],
265 EmptyOptional(),
266 reasonIfUnsupported);
267 }
268 else
269 {
270 return IsConvolution2dSupported(infos[0],
271 infos[1],
272 desc,
273 infos[2],
274 infos[3],
275 reasonIfUnsupported);
276 }
277 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000278 case LayerType::Convolution3d:
279 {
280 if (infos.size() != 4)
281 {
282 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
283 "TensorInfos should be of format: {input, output, weights, biases}.");
284 }
285
286 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
287 if (infos[3] == TensorInfo())
288 {
289 return IsConvolution3dSupported(infos[0],
290 infos[1],
291 desc,
292 infos[2],
293 EmptyOptional(),
294 reasonIfUnsupported);
295 }
296 else
297 {
298 return IsConvolution3dSupported(infos[0],
299 infos[1],
300 desc,
301 infos[2],
302 infos[3],
303 reasonIfUnsupported);
304 }
305 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000306 case LayerType::DepthToSpace:
307 return IsDepthToSpaceSupported(infos[0],
308 infos[1],
309 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
310 reasonIfUnsupported);
311 case LayerType::DepthwiseConvolution2d:
312 {
313 if (infos.size() != 4)
314 {
315 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
316 "TensorInfos should be of format: {input, output, weights, biases}.");
317 }
318
319 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
320 if (infos[3] == TensorInfo())
321 {
322 return IsDepthwiseConvolutionSupported(infos[0],
323 infos[1],
324 desc,
325 infos[2],
326 EmptyOptional(),
327 reasonIfUnsupported);
328 }
329 else
330 {
331 return IsDepthwiseConvolutionSupported(infos[0],
332 infos[1],
333 desc,
334 infos[2],
335 infos[3],
336 reasonIfUnsupported);
337 }
338 }
339 case LayerType::Dequantize:
340 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
341 case LayerType::Division:
342 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
343 case LayerType::ElementwiseUnary:
344 return IsElementwiseUnarySupported(infos[0],
345 infos[1],
346 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
347 reasonIfUnsupported);
348 case LayerType::Fill:
349 return IsFillSupported(infos[0],
350 infos[1],
351 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
352 reasonIfUnsupported);
353 case LayerType::Floor:
354 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
355 case LayerType::FullyConnected:
356 return IsFullyConnectedSupported(infos[0],
357 infos[1],
358 infos[2],
359 infos[3],
360 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
361 reasonIfUnsupported);
362 case LayerType::Gather:
363 return IsGatherSupported(infos[0],
364 infos[1],
365 infos[2],
366 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
367 reasonIfUnsupported);
Teresa Charlin989e2f62022-04-27 16:26:11 +0100368 case LayerType::GatherNd:
369 return IsGatherNdSupported(infos[0],
370 infos[1],
371 infos[2],
372 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000373 case LayerType::Input:
374 return IsInputSupported(infos[0], reasonIfUnsupported);
375 case LayerType::InstanceNormalization:
376 return IsInstanceNormalizationSupported(infos[0],
377 infos[1],
378 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
379 (&descriptor)),
380 reasonIfUnsupported);
381 case LayerType::L2Normalization:
382 return IsL2NormalizationSupported(infos[0],
383 infos[1],
384 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
385 reasonIfUnsupported);
386 case LayerType::LogicalBinary:
387 return IsLogicalBinarySupported(infos[0],
388 infos[1],
389 infos[2],
390 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
391 reasonIfUnsupported);
392 case LayerType::LogSoftmax:
393 return IsLogSoftmaxSupported(infos[0],
394 infos[1],
395 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
396 reasonIfUnsupported);
397 case LayerType::Lstm:
398 return IsLstmSupported(infos[0],
399 infos[1],
400 infos[2],
401 infos[3],
402 infos[4],
403 infos[5],
404 infos[6],
405 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
406 lstmParamsInfo.value(),
407 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000408 case LayerType::Map:
409 return true;
410 case LayerType::MemCopy:
411 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
412 case LayerType::MemImport:
413 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
414 case LayerType::Merge:
415 return LayerSupportBase::IsMergeSupported(infos[0],
416 infos[1],
417 infos[2],
418 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000419 case LayerType::Maximum:
420 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
421 case LayerType::Mean:
422 return IsMeanSupported(infos[0],
423 infos[1],
424 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
425 reasonIfUnsupported);
426 case LayerType::Minimum:
427 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
428 case LayerType::Multiplication:
429 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
430 case LayerType::Normalization:
431 return IsNormalizationSupported(infos[0],
432 infos[1],
433 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
434 reasonIfUnsupported);
435 case LayerType::Output:
436 return IsOutputSupported(infos[0], reasonIfUnsupported);
437 case LayerType::Pad:
438 return IsPadSupported(infos[0],
439 infos[1],
440 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
441 reasonIfUnsupported);
442 case LayerType::Permute:
443 return IsPermuteSupported(infos[0],
444 infos[1],
445 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
446 reasonIfUnsupported);
447 case LayerType::Pooling2d:
448 return IsPooling2dSupported(infos[0],
449 infos[1],
450 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
451 reasonIfUnsupported);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000452 case LayerType::Pooling3d:
453 return IsPooling3dSupported(infos[0],
454 infos[1],
455 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
456 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000457 case LayerType::Prelu:
458 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000459 case LayerType::QLstm:
460 return IsQLstmSupported(infos[0],
461 infos[1],
462 infos[2],
463 infos[3],
464 infos[4],
465 infos[5],
466 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
467 lstmParamsInfo.value(),
468 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000469 case LayerType::Quantize:
470 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
471 case LayerType::QuantizedLstm:
472 return IsQuantizedLstmSupported(infos[0],
473 infos[1],
474 infos[2],
475 infos[3],
476 infos[4],
477 quantizedLstmParamsInfo.value(),
478 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000479 case LayerType::Rank:
480 return true;
481 case LayerType::Reduce:
482 return IsReduceSupported(infos[0],
483 infos[1],
484 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
485 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000486 case LayerType::Reshape:
487 return IsReshapeSupported(infos[0],
488 infos[1],
489 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
490 reasonIfUnsupported);
491 case LayerType::Resize:
492 return IsResizeSupported(infos[0],
493 infos[1],
494 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
495 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000496 case LayerType::Shape:
497 return LayerSupportBase::IsShapeSupported(infos[0],
498 infos[1],
499 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000500 case LayerType::Slice:
501 return IsSliceSupported(infos[0],
502 infos[1],
503 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
504 reasonIfUnsupported);
505 case LayerType::Softmax:
506 return IsSoftmaxSupported(infos[0],
507 infos[1],
508 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
509 reasonIfUnsupported);
510 case LayerType::SpaceToBatchNd:
511 return IsSpaceToBatchNdSupported(infos[0],
512 infos[1],
513 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
514 reasonIfUnsupported);
515 case LayerType::SpaceToDepth:
516 return IsSpaceToDepthSupported(infos[0],
517 infos[1],
518 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
519 reasonIfUnsupported);
520 case LayerType::Splitter:
521 {
522 std::vector<TensorInfo> outputInfos;
523 for (uint32_t i = 1; i < infos.size(); i++)
524 {
525 outputInfos.push_back(infos[i]);
526 }
527 return IsSplitterSupported(infos[0],
528 {outputInfos.begin(), outputInfos.end()},
529 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
530 reasonIfUnsupported);
531 }
532 case LayerType::Stack:
533 {
534 std::vector<const TensorInfo*> inputInfos;
535 for (uint32_t i = 0; i < infos.size() - 1; i++)
536 {
537 inputInfos.push_back(&infos[i]);
538 }
539 return IsStackSupported(inputInfos,
540 infos[infos.size() - 1],
541 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
542 reasonIfUnsupported);
543 }
544 case LayerType::StridedSlice:
545 return IsStridedSliceSupported(infos[0],
546 infos[1],
547 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
548 reasonIfUnsupported);
549 case LayerType::Subtraction:
550 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
551 case LayerType::Transpose:
552 return IsTransposeSupported(infos[0],
553 infos[1],
554 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
555 reasonIfUnsupported);
556 case LayerType::TransposeConvolution2d:
557 {
558 if (infos.size() != 4)
559 {
560 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
561 "TensorInfos should be of format: {input, output, weights, biases}.");
562 }
563
564 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
565 if (infos[3] == TensorInfo())
566 {
567 return IsTransposeConvolution2dSupported(infos[0],
568 infos[1],
569 desc,
570 infos[2],
571 EmptyOptional(),
572 reasonIfUnsupported);
573 }
574 else
575 {
576 return IsTransposeConvolution2dSupported(infos[0],
577 infos[1],
578 desc,
579 infos[2],
580 infos[3],
581 reasonIfUnsupported);
582 }
583 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000584 case LayerType::UnidirectionalSequenceLstm:
585 return IsUnidirectionalSequenceLstmSupported(infos[0],
586 infos[1],
587 infos[2],
588 infos[3],
589 infos[4],
590 infos[5],
591 *(PolymorphicDowncast<const
592 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
593 lstmParamsInfo.value(),
594 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000595 case LayerType::Unmap:
596 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000597 default:
598 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000599 // debug, detectionpostprocess, fakequantization,
600 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000601 return false;
602 }
603}
604
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100605bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
606 const TensorInfo& output,
607 const ActivationDescriptor& descriptor,
608 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000609{
telsoa01c577f2c2018-08-31 09:22:23 +0100610 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
611 reasonIfUnsupported,
612 input,
613 output,
614 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000615}
616
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100617bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
618 const TensorInfo& input1,
619 const TensorInfo& output,
620 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000621{
arovir01085f0a42018-10-08 14:48:19 +0100622 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
623 reasonIfUnsupported,
624 input0,
625 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000626 output,
627 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000628}
629
James Conroy2dc05722019-09-19 17:00:31 +0100630bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
631 const TensorInfo& output,
632 const ArgMinMaxDescriptor& descriptor,
633 Optional<std::string&> reasonIfUnsupported) const
634{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000635
James Conroy2dc05722019-09-19 17:00:31 +0100636 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
637 reasonIfUnsupported,
638 input,
639 output,
640 descriptor);
641}
642
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100643bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
644 const TensorInfo& output,
645 const TensorInfo& mean,
646 const TensorInfo& var,
647 const TensorInfo& beta,
648 const TensorInfo& gamma,
649 const BatchNormalizationDescriptor& descriptor,
650 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000651{
telsoa01c577f2c2018-08-31 09:22:23 +0100652 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
653 reasonIfUnsupported,
654 input,
655 output,
656 mean,
657 var,
658 beta,
659 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000660 descriptor,
661 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000662}
663
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100664bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
665 const TensorInfo& output,
666 const BatchToSpaceNdDescriptor& descriptor,
667 Optional<std::string&> reasonIfUnsupported) const
668{
669 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
670 reasonIfUnsupported,
671 input,
672 output,
673 descriptor);
674}
675
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100676bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
677 const TensorInfo& output,
678 Optional<std::string&> reasonIfUnsupported) const
679{
680 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
681 reasonIfUnsupported,
682 input,
683 output);
684}
685
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100686bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000687 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100688 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000689 Optional<std::string&> reasonIfUnsupported) const
690{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100691 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000692 reasonIfUnsupported,
693 input,
694 output,
695 descriptor);
696}
697
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100698bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
699 const TensorInfo& input1,
700 const TensorInfo& output,
701 const ComparisonDescriptor& descriptor,
702 Optional<std::string&> reasonIfUnsupported) const
703{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000704 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
705 reasonIfUnsupported,
706 input0,
707 input1,
708 output,
709 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100710}
711
Jim Flynn906f9462019-05-10 13:55:21 +0100712bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
713 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000714 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100715 Optional<std::string&> reasonIfUnsupported) const
716{
Jim Flynne242f2d2019-05-22 14:24:13 +0100717 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
718 {
719 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
720 return false;
721 }
722
723 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
724 if(concatInnerAxis < 3) // Width, height, or channels
725 {
726 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
727 reasonIfUnsupported,
728 inputs,
729 output,
730 descriptor);
731 }
732 else if (concatInnerAxis == 3)
733 {
734 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
735 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
736 for (auto& input : inputs)
737 {
738 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
739 {
740 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
741 return false;
742 }
743 }
744 return true; // Sub-tensors support concat along batch
745 }
746 else // > 4 dimensions not supported.
747 {
748 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
749 return false;
750 }
Jim Flynn906f9462019-05-10 13:55:21 +0100751}
752
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100753bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
754 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000755{
Mike Kelly0886ac42020-04-27 09:55:40 +0100756 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
757 reasonIfUnsupported,
758 output);
telsoa014fcda012018-03-09 14:13:49 +0000759}
760
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100761bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
762 const TensorInfo& output,
763 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000764{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100765 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
766 reasonIfUnsupported,
767 input,
768 output);
telsoa014fcda012018-03-09 14:13:49 +0000769}
770
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100771bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
772 const TensorInfo& output,
773 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000774{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100775 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
776 reasonIfUnsupported,
777 input,
778 output);
telsoa014fcda012018-03-09 14:13:49 +0000779}
780
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100781bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
782 const TensorInfo& output,
783 const Convolution2dDescriptor& descriptor,
784 const TensorInfo& weights,
785 const Optional<TensorInfo>& biases,
786 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000787{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100788 bool isFastMathEnabled = false;
789#if defined(ARMCOMPUTECL_ENABLED)
790 if (m_ModelContextPtr)
791 {
792 if (m_ModelContextPtr.get() != nullptr)
793 {
794 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
795 if (modelOptions)
796 {
797 isFastMathEnabled = modelOptions->IsFastMathEnabled();
798 }
799 }
800 }
801#endif
802
surmeh013537c2c2018-05-18 16:31:43 +0100803 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
804 reasonIfUnsupported,
805 input,
806 output,
807 descriptor,
808 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100809 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000810 isFastMathEnabled,
811 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000812}
813
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100814bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
815 const TensorInfo& output,
816 const Convolution3dDescriptor& descriptor,
817 const TensorInfo& weights,
818 const Optional<TensorInfo>& biases,
819 Optional<std::string&> reasonIfUnsupported) const
820{
821 bool isFastMathEnabled = false;
822#if defined(ARMCOMPUTECL_ENABLED)
823 if (m_ModelContextPtr)
824{
825 if (m_ModelContextPtr.get() != nullptr)
826 {
827 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
828 if (modelOptions)
829 {
830 isFastMathEnabled = modelOptions->IsFastMathEnabled();
831 }
832 }
833}
834#endif
835
836 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
837 reasonIfUnsupported,
838 input,
839 output,
840 descriptor,
841 weights,
842 biases,
843 isFastMathEnabled,
844 nullptr);
845}
846
Jim Flynn983daec2019-05-29 16:20:16 +0100847bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
848 const TensorInfo& output,
849 Optional<std::string&> reasonIfUnsupported) const
850{
851 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
852 reasonIfUnsupported,
853 input,
854 output);
855}
856
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100857bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
858 const TensorInfo& output,
859 const DepthToSpaceDescriptor& descriptor,
860 Optional<std::string&> reasonIfUnsupported) const
861{
862 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
863 reasonIfUnsupported,
864 input,
865 output,
866 descriptor);
867}
868
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100869bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
870 const TensorInfo& output,
871 const DepthwiseConvolution2dDescriptor& descriptor,
872 const TensorInfo& weights,
873 const Optional<TensorInfo>& biases,
874 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000875{
telsoa01c577f2c2018-08-31 09:22:23 +0100876 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
877 reasonIfUnsupported,
878 input,
879 output,
880 descriptor,
881 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000882 biases,
883 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000884}
885
Pablo Tellof0bd6832019-04-26 17:58:13 +0100886bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
887 const TensorInfo& output,
888 const DepthwiseConvolution2dDescriptor& descriptor,
889 const TensorInfo& weights,
890 const Optional<TensorInfo>& biases,
891 Optional<std::string&> reasonIfUnsupported) const
892{
893 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
894 reasonIfUnsupported,
895 input,
896 output,
897 descriptor,
898 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000899 biases,
900 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100901}
902
903
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100904bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
905 const TensorInfo& input1,
906 const TensorInfo& output,
907 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100908{
909 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
910 reasonIfUnsupported,
911 input0,
912 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000913 output,
914 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100915}
916
josh minor4a3c6102020-01-06 16:40:46 -0600917bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
918 const TensorInfo& output,
919 const ElementwiseUnaryDescriptor& descriptor,
920 Optional<std::string&> reasonIfUnsupported) const
921{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100922 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600923 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100924 case UnaryOperation::Abs:
925 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
926 reasonIfUnsupported,
927 input,
928 output);
929 case UnaryOperation::Exp:
930 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
931 reasonIfUnsupported,
932 input,
933 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100934 case UnaryOperation::Log:
935 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
936 reasonIfUnsupported,
937 input,
938 output);
939 case UnaryOperation::LogicalNot:
940 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
941 reasonIfUnsupported,
942 input,
943 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100944 case UnaryOperation::Neg:
945 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
946 reasonIfUnsupported,
947 input,
948 output);
949 case UnaryOperation::Rsqrt:
950 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
951 reasonIfUnsupported,
952 input,
953 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100954 case UnaryOperation::Sin:
955 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +0000956 reasonIfUnsupported,
957 input,
958 output);
Teresa Charlinaac61122022-05-05 16:11:36 +0100959 case UnaryOperation::Sqrt:
960 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSqrtWorkloadValidate,
961 reasonIfUnsupported,
962 input,
963 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100964 default:
965 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600966 }
josh minor4a3c6102020-01-06 16:40:46 -0600967}
968
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100969bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
970 const TensorInfo& output,
971 const FillDescriptor& descriptor,
972 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +0100973{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100974 armnn::IgnoreUnused(input);
975 armnn::IgnoreUnused(output);
976 armnn::IgnoreUnused(descriptor);
977
978 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +0100979}
980
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100981bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
982 const TensorInfo& output,
983 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000984{
Sadik Armagan9be49162019-10-30 16:15:26 +0000985 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
986 reasonIfUnsupported,
987 input,
988 output);
telsoa01c577f2c2018-08-31 09:22:23 +0100989}
990
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100991bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
992 const TensorInfo& output,
993 const TensorInfo& weights,
994 const TensorInfo& biases,
995 const FullyConnectedDescriptor& descriptor,
996 Optional<std::string&> reasonIfUnsupported) const
997{
998 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
999 reasonIfUnsupported,
1000 input,
1001 output,
1002 weights,
1003 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001004 descriptor,
1005 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001006}
1007
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001008bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1009 const TensorInfo& input1,
1010 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001011 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001012 Optional<std::string&> reasonIfUnsupported) const
1013{
1014 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1015 reasonIfUnsupported,
1016 input0,
1017 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001018 output,
1019 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001020}
1021
Teresa Charlin989e2f62022-04-27 16:26:11 +01001022bool ClLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1023 const TensorInfo& input1,
1024 const TensorInfo& output,
1025 Optional<std::string&> reasonIfUnsupported) const
1026{
1027 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherNdWorkloadValidate,
1028 reasonIfUnsupported,
1029 input0,
1030 input1,
1031 output);
1032}
1033
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001034bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1035 Optional<std::string&> reasonIfUnsupported) const
1036{
Derek Lamberti901ea112019-12-10 22:07:09 +00001037 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001038}
1039
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001040bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1041 const TensorInfo& output,
1042 const InstanceNormalizationDescriptor& descriptor,
1043 Optional<std::string&> reasonIfUnsupported) const
1044{
1045 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1046 reasonIfUnsupported,
1047 input,
1048 output,
1049 descriptor);
1050}
1051
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001052bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1053 const TensorInfo& output,
1054 const L2NormalizationDescriptor& descriptor,
1055 Optional<std::string&> reasonIfUnsupported) const
1056{
1057 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1058 reasonIfUnsupported,
1059 input,
1060 output,
1061 descriptor);
1062}
1063
James Conroyfe3ec942020-11-18 14:20:53 +00001064bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1065 const TensorInfo& input1,
1066 const TensorInfo& output,
1067 const LogicalBinaryDescriptor& descriptor,
1068 Optional<std::string&> reasonIfUnsupported) const
1069{
1070 IgnoreUnused(output);
1071
1072 switch(descriptor.m_Operation)
1073 {
1074 case LogicalBinaryOperation::LogicalAnd:
1075 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1076 reasonIfUnsupported,
1077 input0,
1078 input1,
1079 output);
1080 case LogicalBinaryOperation::LogicalOr:
1081 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1082 reasonIfUnsupported,
1083 input0,
1084 input1,
1085 output);
1086 default:
1087 return false;
1088 }
1089}
1090
1091
Teresa Charlin8398edc2020-07-20 14:23:02 +01001092bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1093 const TensorInfo& output,
1094 const LogSoftmaxDescriptor& descriptor,
1095 Optional<std::string&> reasonIfUnsupported) const
1096{
1097 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1098 reasonIfUnsupported,
1099 input,
1100 output,
1101 descriptor);
1102}
1103
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001104bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1105 const TensorInfo& outputStateIn,
1106 const TensorInfo& cellStateIn,
1107 const TensorInfo& scratchBuffer,
1108 const TensorInfo& outputStateOut,
1109 const TensorInfo& cellStateOut,
1110 const TensorInfo& output,
1111 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001112 const LstmInputParamsInfo& paramsInfo,
1113 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001114{
arovir01085f0a42018-10-08 14:48:19 +01001115 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1116 reasonIfUnsupported,
1117 input,
1118 outputStateIn,
1119 cellStateIn,
1120 scratchBuffer,
1121 outputStateOut,
1122 cellStateOut,
1123 output,
1124 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001125 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001126}
1127
keidav01a959ee52018-12-19 10:04:58 +00001128bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1129 const TensorInfo& input1,
1130 const TensorInfo& output,
1131 Optional<std::string&> reasonIfUnsupported) const
1132{
1133 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1134 reasonIfUnsupported,
1135 input0,
1136 input1,
1137 output);
1138}
1139
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001140bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1141 const TensorInfo& output,
1142 const MeanDescriptor& descriptor,
1143 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001144{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001145 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1146 reasonIfUnsupported,
1147 input,
1148 output,
1149 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001150}
1151
saoste019292aa32019-01-08 13:55:59 +00001152bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1153 const TensorInfo& input1,
1154 const TensorInfo& output,
1155 Optional<std::string&> reasonIfUnsupported) const
1156{
1157 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1158 reasonIfUnsupported,
1159 input0,
1160 input1,
1161 output);
1162}
1163
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001164bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1165 const TensorInfo& input1,
1166 const TensorInfo& output,
1167 Optional<std::string&> reasonIfUnsupported) const
1168{
1169 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1170 reasonIfUnsupported,
1171 input0,
1172 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001173 output,
1174 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001175}
1176
1177bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1178 const TensorInfo& output,
1179 const NormalizationDescriptor& descriptor,
1180 Optional<std::string&> reasonIfUnsupported) const
1181{
1182 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1183}
1184
1185bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1186 Optional<std::string&> reasonIfUnsupported) const
1187{
Derek Lamberti901ea112019-12-10 22:07:09 +00001188 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001189}
1190
1191bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1192 const TensorInfo& output,
1193 const PadDescriptor& descriptor,
1194 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001195{
1196 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1197 reasonIfUnsupported,
1198 input,
1199 output,
1200 descriptor);
1201}
1202
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001203bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1204 const TensorInfo& output,
1205 const PermuteDescriptor& descriptor,
1206 Optional<std::string&> reasonIfUnsupported) const
1207{
Matthew Bentham9820d302019-11-27 17:24:47 +00001208 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001209}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001210
1211bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1212 const TensorInfo& output,
1213 const Pooling2dDescriptor& descriptor,
1214 Optional<std::string&> reasonIfUnsupported) const
1215{
1216 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1217}
1218
Ryan OSheabab8fa92022-03-09 10:29:02 +00001219bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1220 const TensorInfo& output,
1221 const Pooling3dDescriptor& descriptor,
1222 Optional<std::string&> reasonIfUnsupported) const
1223{
1224 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1225}
1226
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001227bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1228 const armnn::TensorInfo &alpha,
1229 const armnn::TensorInfo &output,
1230 armnn::Optional<std::string &> reasonIfUnsupported) const
1231{
1232 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1233}
1234
Ryan OShea2323af42020-05-13 16:36:19 +01001235bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1236 const TensorInfo& previousOutputIn,
1237 const TensorInfo& previousCellStateIn,
1238 const TensorInfo& outputStateOut,
1239 const TensorInfo& cellStateOut,
1240 const TensorInfo& output,
1241 const QLstmDescriptor& descriptor,
1242 const LstmInputParamsInfo& paramsInfo,
1243 Optional<std::string&> reasonIfUnsupported) const
1244{
1245 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1246 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1247 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1248 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1249 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1250 output.GetDataType() == armnn::DataType::QAsymmS8)
1251 {
1252 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1253 reasonIfUnsupported,
1254 input,
1255 previousCellStateIn,
1256 previousOutputIn,
1257 cellStateOut,
1258 outputStateOut,
1259 output,
1260 descriptor,
1261 paramsInfo);
1262 }
1263 else
1264 {
1265 return false;
1266 }
1267}
1268
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001269bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1270 const TensorInfo& previousCellStateIn,
1271 const TensorInfo& previousOutputIn,
1272 const TensorInfo& cellStateOut,
1273 const TensorInfo& output,
1274 const QuantizedLstmInputParamsInfo& paramsInfo,
1275 Optional<std::string&> reasonIfUnsupported) const
1276{
1277 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1278 reasonIfUnsupported,
1279 input,
1280 previousCellStateIn,
1281 previousOutputIn,
1282 cellStateOut,
1283 output,
1284 paramsInfo);
1285}
1286
Sadik Armagan20ec2492019-05-31 09:09:44 +01001287bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1288 const TensorInfo& output,
1289 Optional<std::string&> reasonIfUnsupported) const
1290{
1291 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1292 reasonIfUnsupported,
1293 input,
1294 output);
1295}
1296
Sadik Armagana2747482021-02-09 10:28:54 +00001297bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1298 const TensorInfo& output,
1299 const ReduceDescriptor& descriptor,
1300 Optional<std::string&> reasonIfUnsupported) const
1301{
1302 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1303 reasonIfUnsupported,
1304 input,
1305 output,
1306 descriptor);
1307}
1308
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001309bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001310 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001311 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001312 Optional<std::string&> reasonIfUnsupported) const
1313{
Jan Eilers8eb25602020-03-09 12:13:48 +00001314 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001315 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001316}
1317
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001318bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1319 const TensorInfo& output,
1320 const ResizeDescriptor& descriptor,
1321 Optional<std::string&> reasonIfUnsupported) const
1322{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001323 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001324}
1325
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001326bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1327 const TensorInfo& output,
1328 const SliceDescriptor& descriptor,
1329 Optional<std::string&> reasonIfUnsupported) const
1330{
1331 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1332}
1333
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001334bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1335 const TensorInfo& output,
1336 const SoftmaxDescriptor& descriptor,
1337 Optional<std::string&> reasonIfUnsupported) const
1338{
Francis Murtagh3b938352019-07-26 15:44:17 +01001339 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001340}
1341
Sadik Armaganf4464322018-12-20 16:19:12 +00001342bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1343 const TensorInfo& output,
1344 const SpaceToBatchNdDescriptor& descriptor,
1345 Optional<std::string&> reasonIfUnsupported) const
1346{
1347 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1348 reasonIfUnsupported,
1349 input,
1350 output,
1351 descriptor);
1352}
1353
James Conroyd2aa85e2019-07-01 17:12:40 +01001354bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1355 const TensorInfo& output,
1356 const SpaceToDepthDescriptor& descriptor,
1357 Optional<std::string&> reasonIfUnsupported) const
1358{
1359 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1360 reasonIfUnsupported,
1361 input,
1362 output,
1363 descriptor);
1364}
1365
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001366bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001367 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1368 const ViewsDescriptor& descriptor,
1369 Optional<std::string&> reasonIfUnsupported) const
1370{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001371#if defined(ARMCOMPUTECL_ENABLED)
1372 // Split along the last dimension, cannot use sub-tensors
1373 // as width and height of the sub-tensors do not match
1374 // the width and height of the parent tensor
1375 // in case of input with more than 2D.
1376 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1377 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1378 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1379 {
1380 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1381 reasonIfUnsupported,
1382 input,
1383 outputs,
1384 *splitAxis.begin());
1385 }
1386#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001387 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001388 for (auto output : outputs)
1389 {
1390 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1391 {
1392 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1393 return false;
1394 }
1395 }
1396 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001397}
1398
Matthew Jacksond5166102019-07-31 14:06:28 +01001399bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1400 const TensorInfo& output,
1401 const StackDescriptor& descriptor,
1402 Optional<std::string&> reasonIfUnsupported) const
1403{
1404 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1405 reasonIfUnsupported,
1406 inputs,
1407 output,
1408 descriptor);
1409}
1410
keidav01d74dc912018-12-10 18:16:07 +00001411bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1412 const TensorInfo& output,
1413 const StridedSliceDescriptor& descriptor,
1414 Optional<std::string&> reasonIfUnsupported) const
1415{
1416 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1417 reasonIfUnsupported,
1418 input,
1419 output,
1420 descriptor);
1421}
1422
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001423bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1424 const TensorInfo& input1,
1425 const TensorInfo& output,
1426 Optional<std::string&> reasonIfUnsupported) const
1427{
1428 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1429 reasonIfUnsupported,
1430 input0,
1431 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001432 output,
1433 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001434}
1435
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001436bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1437 const TensorInfo& output,
1438 const TransposeConvolution2dDescriptor& descriptor,
1439 const TensorInfo& weights,
1440 const Optional<TensorInfo>& biases,
1441 Optional<std::string&> reasonIfUnsupported) const
1442{
1443 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1444 reasonIfUnsupported,
1445 input,
1446 output,
1447 descriptor,
1448 weights,
1449 biases);
1450}
1451
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001452bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1453 const TensorInfo& output,
1454 const TransposeDescriptor& descriptor,
1455 Optional<std::string&> reasonIfUnsupported) const
1456{
1457 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1458}
1459
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001460bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1461 const TensorInfo& outputStateIn,
1462 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001463 const TensorInfo& outputStateOut,
1464 const TensorInfo& cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001465 const TensorInfo& output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001466 const UnidirectionalSequenceLstmDescriptor& descriptor,
1467 const LstmInputParamsInfo& paramsInfo,
1468 Optional<std::string&> reasonIfUnsupported) const
1469{
1470 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1471 reasonIfUnsupported,
1472 input,
1473 outputStateIn,
1474 cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001475 outputStateOut,
1476 cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001477 output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001478 descriptor,
1479 paramsInfo);
1480}
1481
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001482} // namespace armnn