blob: cb2d75603710a96ac48973e3c023d705bb839ea1 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
Teresa Charlin94916a52022-10-19 08:48:07 +010025#include "workloads/ClBatchMatMulWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010026#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000027#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010028#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010029#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000030#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010031#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010032#include "workloads/ClConvertFp16ToFp32Workload.hpp"
33#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010034#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010035#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010036#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010037#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010038#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010039#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010040#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010041#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000042#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010043#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010044#include "workloads/ClGatherWorkload.hpp"
Teresa Charlin989e2f62022-04-27 16:26:11 +010045#include "workloads/ClGatherNdWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010046#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010047#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010048#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010049#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000050#include "workloads/ClLogicalAndWorkload.hpp"
51#include "workloads/ClLogicalNotWorkload.hpp"
52#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010053#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000054#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010055#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010056#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000057#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010058#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000059#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010060#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010061#include "workloads/ClPadWorkload.hpp"
62#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010063#include "workloads/ClPooling2dWorkload.hpp"
Ryan OSheabab8fa92022-03-09 10:29:02 +000064#include "workloads/ClPooling3dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010065#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010066#include "workloads/ClQLstmWorkload.hpp"
67#include "workloads/ClQuantizedLstmWorkload.hpp"
68#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000069#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000070#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010071#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010072#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010073#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000074#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010075#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000076#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010077#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010078#include "workloads/ClSplitterWorkload.hpp"
Teresa Charlinaac61122022-05-05 16:11:36 +010079#include "workloads/ClSqrtWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010080#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000081#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010082#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010083#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000084#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000085#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000086#endif
87
telsoa014fcda012018-03-09 14:13:49 +000088
89namespace armnn
90{
arovir017c22c702018-10-09 11:16:46 +010091
telsoa014fcda012018-03-09 14:13:49 +000092namespace
93{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010094
telsoa014fcda012018-03-09 14:13:49 +000095template<unsigned int FilterSize>
96bool IsMatchingSize2d(const TensorInfo& weightInfo)
97{
telsoa01c577f2c2018-08-31 09:22:23 +010098 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000099 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
100}
101
102template<uint32_t ValidStride>
103bool IsMatchingStride(uint32_t actualStride)
104{
105 return ValidStride == actualStride;
106}
107
108template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
109bool IsMatchingStride(uint32_t actualStride)
110{
111 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100112}
telsoa014fcda012018-03-09 14:13:49 +0000113
Derek Lamberti901ea112019-12-10 22:07:09 +0000114template<typename ... Args>
115bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000116{
Jan Eilers8eb25602020-03-09 12:13:48 +0000117 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000118#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000119 return true;
120#else
arovir01085f0a42018-10-08 14:48:19 +0100121 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000122 {
arovir01085f0a42018-10-08 14:48:19 +0100123 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000124 }
125 return false;
126#endif
127}
128
Matteo Martincighd95e9062019-01-31 15:35:59 +0000129#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000130#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
131#else
132#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
133#endif
134
Matteo Martincighd95e9062019-01-31 15:35:59 +0000135#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000136template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100137inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000138{
139 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
140 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
141 if (!supported && reasonIfUnsupported)
142 {
arovir01085f0a42018-10-08 14:48:19 +0100143 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000144 }
145 return supported;
146}
147
148#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
149 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
150#else
151#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000152 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000153#endif
154
telsoa01c577f2c2018-08-31 09:22:23 +0100155template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100156bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000157 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100158 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000159 Uint8Func uint8FuncPtr,
160 Params&&... params)
161{
162 return IsClBackendSupported(reasonIfUnsupported) &&
163 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
164 dataType,
165 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100166 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000167 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000168 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000169 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000170 std::forward<Params>(params)...);
171}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100172} // anonymous namespace
173
Sadik Armagan045f6be2020-09-10 13:37:32 +0100174ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
175 : m_ModelContextPtr(modelContextPtr)
176{
177}
178
179ClLayerSupport::ClLayerSupport()
180 : m_ModelContextPtr(nullptr)
181{
182}
183
Cathal Corbett34b429c2021-12-24 12:24:40 +0000184bool ClLayerSupport::IsLayerSupported(const LayerType& type,
185 const std::vector<TensorInfo>& infos,
186 const BaseDescriptor& descriptor,
187 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
188 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
189 Optional<std::string&> reasonIfUnsupported) const
190{
191 switch (type)
192 {
193 case LayerType::Activation:
194 return IsActivationSupported(infos[0],
195 infos[1],
196 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
197 reasonIfUnsupported);
198 case LayerType::Addition:
199 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
200 case LayerType::ArgMinMax:
201 return IsArgMinMaxSupported(infos[0],
202 infos[1],
203 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
204 reasonIfUnsupported);
Teresa Charlin94916a52022-10-19 08:48:07 +0100205 case LayerType::BatchMatMul:
206 return IsBatchMatMulSupported(infos[0],
207 infos[1],
208 infos[2],
209 *(PolymorphicDowncast<const BatchMatMulDescriptor*>(&descriptor)),
210 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000211 case LayerType::BatchNormalization:
212 return IsBatchNormalizationSupported(infos[0],
213 infos[1],
214 infos[2],
215 infos[3],
216 infos[4],
217 infos[5],
218 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
219 (&descriptor)),
220 reasonIfUnsupported);
221 case LayerType::BatchToSpaceNd:
222 return IsBatchToSpaceNdSupported(infos[0],
223 infos[1],
224 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
225 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000226 case LayerType::Cast:
227 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
228 case LayerType::ChannelShuffle:
229 return IsChannelShuffleSupported(infos[0],
230 infos[1],
231 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
232 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000233 case LayerType::Comparison:
234 return IsComparisonSupported(infos[0],
235 infos[1],
236 infos[2],
237 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
238 reasonIfUnsupported);
239 case LayerType::Concat:
240 {
241 std::vector<const TensorInfo*> inputInfos;
242 for (uint32_t i = 0; i < (infos.size() - 1); i++)
243 {
244 inputInfos.push_back(&infos[i]);
245 }
246 return IsConcatSupported(inputInfos,
247 infos[infos.size() - 1],
248 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
249 reasonIfUnsupported);
250 }
251 case LayerType::Constant:
252 return IsConstantSupported(infos[0], reasonIfUnsupported);
253 case LayerType::ConvertFp16ToFp32:
254 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
255 case LayerType::ConvertFp32ToFp16:
256 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
257 case LayerType::Convolution2d:
258 {
259 if (infos.size() != 4)
260 {
261 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
262 "TensorInfos should be of format: {input, output, weights, biases}.");
263 }
264
265 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
266 if (infos[3] == TensorInfo())
267 {
268 return IsConvolution2dSupported(infos[0],
269 infos[1],
270 desc,
271 infos[2],
272 EmptyOptional(),
273 reasonIfUnsupported);
274 }
275 else
276 {
277 return IsConvolution2dSupported(infos[0],
278 infos[1],
279 desc,
280 infos[2],
281 infos[3],
282 reasonIfUnsupported);
283 }
284 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000285 case LayerType::Convolution3d:
286 {
287 if (infos.size() != 4)
288 {
289 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
290 "TensorInfos should be of format: {input, output, weights, biases}.");
291 }
292
293 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
294 if (infos[3] == TensorInfo())
295 {
296 return IsConvolution3dSupported(infos[0],
297 infos[1],
298 desc,
299 infos[2],
300 EmptyOptional(),
301 reasonIfUnsupported);
302 }
303 else
304 {
305 return IsConvolution3dSupported(infos[0],
306 infos[1],
307 desc,
308 infos[2],
309 infos[3],
310 reasonIfUnsupported);
311 }
312 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000313 case LayerType::DepthToSpace:
314 return IsDepthToSpaceSupported(infos[0],
315 infos[1],
316 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
317 reasonIfUnsupported);
318 case LayerType::DepthwiseConvolution2d:
319 {
320 if (infos.size() != 4)
321 {
322 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
323 "TensorInfos should be of format: {input, output, weights, biases}.");
324 }
325
326 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
327 if (infos[3] == TensorInfo())
328 {
329 return IsDepthwiseConvolutionSupported(infos[0],
330 infos[1],
331 desc,
332 infos[2],
333 EmptyOptional(),
334 reasonIfUnsupported);
335 }
336 else
337 {
338 return IsDepthwiseConvolutionSupported(infos[0],
339 infos[1],
340 desc,
341 infos[2],
342 infos[3],
343 reasonIfUnsupported);
344 }
345 }
346 case LayerType::Dequantize:
347 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
348 case LayerType::Division:
349 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
350 case LayerType::ElementwiseUnary:
351 return IsElementwiseUnarySupported(infos[0],
352 infos[1],
353 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
354 reasonIfUnsupported);
355 case LayerType::Fill:
356 return IsFillSupported(infos[0],
357 infos[1],
358 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
359 reasonIfUnsupported);
360 case LayerType::Floor:
361 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
362 case LayerType::FullyConnected:
363 return IsFullyConnectedSupported(infos[0],
364 infos[1],
365 infos[2],
366 infos[3],
367 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
368 reasonIfUnsupported);
369 case LayerType::Gather:
370 return IsGatherSupported(infos[0],
371 infos[1],
372 infos[2],
373 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
374 reasonIfUnsupported);
Teresa Charlin989e2f62022-04-27 16:26:11 +0100375 case LayerType::GatherNd:
376 return IsGatherNdSupported(infos[0],
377 infos[1],
378 infos[2],
379 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000380 case LayerType::Input:
381 return IsInputSupported(infos[0], reasonIfUnsupported);
382 case LayerType::InstanceNormalization:
383 return IsInstanceNormalizationSupported(infos[0],
384 infos[1],
385 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
386 (&descriptor)),
387 reasonIfUnsupported);
388 case LayerType::L2Normalization:
389 return IsL2NormalizationSupported(infos[0],
390 infos[1],
391 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
392 reasonIfUnsupported);
393 case LayerType::LogicalBinary:
394 return IsLogicalBinarySupported(infos[0],
395 infos[1],
396 infos[2],
397 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
398 reasonIfUnsupported);
399 case LayerType::LogSoftmax:
400 return IsLogSoftmaxSupported(infos[0],
401 infos[1],
402 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
403 reasonIfUnsupported);
404 case LayerType::Lstm:
405 return IsLstmSupported(infos[0],
406 infos[1],
407 infos[2],
408 infos[3],
409 infos[4],
410 infos[5],
411 infos[6],
412 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
413 lstmParamsInfo.value(),
414 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000415 case LayerType::Map:
416 return true;
417 case LayerType::MemCopy:
418 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
419 case LayerType::MemImport:
420 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
421 case LayerType::Merge:
422 return LayerSupportBase::IsMergeSupported(infos[0],
423 infos[1],
424 infos[2],
425 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000426 case LayerType::Maximum:
427 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
428 case LayerType::Mean:
429 return IsMeanSupported(infos[0],
430 infos[1],
431 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
432 reasonIfUnsupported);
433 case LayerType::Minimum:
434 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
435 case LayerType::Multiplication:
436 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
437 case LayerType::Normalization:
438 return IsNormalizationSupported(infos[0],
439 infos[1],
440 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
441 reasonIfUnsupported);
442 case LayerType::Output:
443 return IsOutputSupported(infos[0], reasonIfUnsupported);
444 case LayerType::Pad:
445 return IsPadSupported(infos[0],
446 infos[1],
447 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
448 reasonIfUnsupported);
449 case LayerType::Permute:
450 return IsPermuteSupported(infos[0],
451 infos[1],
452 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
453 reasonIfUnsupported);
454 case LayerType::Pooling2d:
455 return IsPooling2dSupported(infos[0],
456 infos[1],
457 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
458 reasonIfUnsupported);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000459 case LayerType::Pooling3d:
460 return IsPooling3dSupported(infos[0],
461 infos[1],
462 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
463 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000464 case LayerType::Prelu:
465 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000466 case LayerType::QLstm:
467 return IsQLstmSupported(infos[0],
468 infos[1],
469 infos[2],
470 infos[3],
471 infos[4],
472 infos[5],
473 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
474 lstmParamsInfo.value(),
475 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000476 case LayerType::Quantize:
477 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
478 case LayerType::QuantizedLstm:
479 return IsQuantizedLstmSupported(infos[0],
480 infos[1],
481 infos[2],
482 infos[3],
483 infos[4],
484 quantizedLstmParamsInfo.value(),
485 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000486 case LayerType::Rank:
487 return true;
488 case LayerType::Reduce:
489 return IsReduceSupported(infos[0],
490 infos[1],
491 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
492 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000493 case LayerType::Reshape:
494 return IsReshapeSupported(infos[0],
495 infos[1],
496 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
497 reasonIfUnsupported);
498 case LayerType::Resize:
499 return IsResizeSupported(infos[0],
500 infos[1],
501 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
502 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000503 case LayerType::Shape:
504 return LayerSupportBase::IsShapeSupported(infos[0],
505 infos[1],
506 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000507 case LayerType::Slice:
508 return IsSliceSupported(infos[0],
509 infos[1],
510 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
511 reasonIfUnsupported);
512 case LayerType::Softmax:
513 return IsSoftmaxSupported(infos[0],
514 infos[1],
515 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
516 reasonIfUnsupported);
517 case LayerType::SpaceToBatchNd:
518 return IsSpaceToBatchNdSupported(infos[0],
519 infos[1],
520 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
521 reasonIfUnsupported);
522 case LayerType::SpaceToDepth:
523 return IsSpaceToDepthSupported(infos[0],
524 infos[1],
525 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
526 reasonIfUnsupported);
527 case LayerType::Splitter:
528 {
529 std::vector<TensorInfo> outputInfos;
530 for (uint32_t i = 1; i < infos.size(); i++)
531 {
532 outputInfos.push_back(infos[i]);
533 }
534 return IsSplitterSupported(infos[0],
535 {outputInfos.begin(), outputInfos.end()},
536 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
537 reasonIfUnsupported);
538 }
539 case LayerType::Stack:
540 {
541 std::vector<const TensorInfo*> inputInfos;
542 for (uint32_t i = 0; i < infos.size() - 1; i++)
543 {
544 inputInfos.push_back(&infos[i]);
545 }
546 return IsStackSupported(inputInfos,
547 infos[infos.size() - 1],
548 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
549 reasonIfUnsupported);
550 }
551 case LayerType::StridedSlice:
552 return IsStridedSliceSupported(infos[0],
553 infos[1],
554 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
555 reasonIfUnsupported);
556 case LayerType::Subtraction:
557 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
558 case LayerType::Transpose:
559 return IsTransposeSupported(infos[0],
560 infos[1],
561 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
562 reasonIfUnsupported);
563 case LayerType::TransposeConvolution2d:
564 {
565 if (infos.size() != 4)
566 {
567 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
568 "TensorInfos should be of format: {input, output, weights, biases}.");
569 }
570
571 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
572 if (infos[3] == TensorInfo())
573 {
574 return IsTransposeConvolution2dSupported(infos[0],
575 infos[1],
576 desc,
577 infos[2],
578 EmptyOptional(),
579 reasonIfUnsupported);
580 }
581 else
582 {
583 return IsTransposeConvolution2dSupported(infos[0],
584 infos[1],
585 desc,
586 infos[2],
587 infos[3],
588 reasonIfUnsupported);
589 }
590 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000591 case LayerType::UnidirectionalSequenceLstm:
592 return IsUnidirectionalSequenceLstmSupported(infos[0],
593 infos[1],
594 infos[2],
595 infos[3],
596 infos[4],
597 infos[5],
598 *(PolymorphicDowncast<const
599 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
600 lstmParamsInfo.value(),
601 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000602 case LayerType::Unmap:
603 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000604 default:
605 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000606 // debug, detectionpostprocess, fakequantization,
607 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000608 return false;
609 }
610}
611
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100612bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
613 const TensorInfo& output,
614 const ActivationDescriptor& descriptor,
615 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000616{
telsoa01c577f2c2018-08-31 09:22:23 +0100617 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
618 reasonIfUnsupported,
619 input,
620 output,
621 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000622}
623
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100624bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
625 const TensorInfo& input1,
626 const TensorInfo& output,
627 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000628{
arovir01085f0a42018-10-08 14:48:19 +0100629 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
630 reasonIfUnsupported,
631 input0,
632 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000633 output,
634 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000635}
636
James Conroy2dc05722019-09-19 17:00:31 +0100637bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
638 const TensorInfo& output,
639 const ArgMinMaxDescriptor& descriptor,
640 Optional<std::string&> reasonIfUnsupported) const
641{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000642
James Conroy2dc05722019-09-19 17:00:31 +0100643 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
644 reasonIfUnsupported,
645 input,
646 output,
647 descriptor);
648}
649
Teresa Charlin94916a52022-10-19 08:48:07 +0100650bool ClLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX,
651 const TensorInfo& inputY,
652 const TensorInfo& output,
653 const BatchMatMulDescriptor& descriptor,
654 Optional<std::string&> reasonIfUnsupported) const
655{
656 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchMatMulValidate,
657 reasonIfUnsupported,
658 inputX,
659 inputY,
660 output,
661 descriptor);
662}
663
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100664bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
665 const TensorInfo& output,
666 const TensorInfo& mean,
667 const TensorInfo& var,
668 const TensorInfo& beta,
669 const TensorInfo& gamma,
670 const BatchNormalizationDescriptor& descriptor,
671 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000672{
telsoa01c577f2c2018-08-31 09:22:23 +0100673 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
674 reasonIfUnsupported,
675 input,
676 output,
677 mean,
678 var,
679 beta,
680 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000681 descriptor,
682 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000683}
684
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100685bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
686 const TensorInfo& output,
687 const BatchToSpaceNdDescriptor& descriptor,
688 Optional<std::string&> reasonIfUnsupported) const
689{
690 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
691 reasonIfUnsupported,
692 input,
693 output,
694 descriptor);
695}
696
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100697bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
698 const TensorInfo& output,
699 Optional<std::string&> reasonIfUnsupported) const
700{
701 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
702 reasonIfUnsupported,
703 input,
704 output);
705}
706
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100707bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000708 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100709 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000710 Optional<std::string&> reasonIfUnsupported) const
711{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100712 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000713 reasonIfUnsupported,
714 input,
715 output,
716 descriptor);
717}
718
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100719bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
720 const TensorInfo& input1,
721 const TensorInfo& output,
722 const ComparisonDescriptor& descriptor,
723 Optional<std::string&> reasonIfUnsupported) const
724{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000725 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
726 reasonIfUnsupported,
727 input0,
728 input1,
729 output,
730 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100731}
732
Jim Flynn906f9462019-05-10 13:55:21 +0100733bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
734 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000735 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100736 Optional<std::string&> reasonIfUnsupported) const
737{
Jim Flynne242f2d2019-05-22 14:24:13 +0100738 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
739 {
740 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
741 return false;
742 }
743
744 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
745 if(concatInnerAxis < 3) // Width, height, or channels
746 {
747 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
748 reasonIfUnsupported,
749 inputs,
750 output,
751 descriptor);
752 }
753 else if (concatInnerAxis == 3)
754 {
755 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
756 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
757 for (auto& input : inputs)
758 {
759 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
760 {
761 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
762 return false;
763 }
764 }
765 return true; // Sub-tensors support concat along batch
766 }
767 else // > 4 dimensions not supported.
768 {
769 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
770 return false;
771 }
Jim Flynn906f9462019-05-10 13:55:21 +0100772}
773
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100774bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
775 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000776{
Mike Kelly0886ac42020-04-27 09:55:40 +0100777 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
778 reasonIfUnsupported,
779 output);
telsoa014fcda012018-03-09 14:13:49 +0000780}
781
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100782bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
783 const TensorInfo& output,
784 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000785{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100786 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
787 reasonIfUnsupported,
788 input,
789 output);
telsoa014fcda012018-03-09 14:13:49 +0000790}
791
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100792bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
793 const TensorInfo& output,
794 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000795{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100796 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
797 reasonIfUnsupported,
798 input,
799 output);
telsoa014fcda012018-03-09 14:13:49 +0000800}
801
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100802bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
803 const TensorInfo& output,
804 const Convolution2dDescriptor& descriptor,
805 const TensorInfo& weights,
806 const Optional<TensorInfo>& biases,
807 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000808{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100809 bool isFastMathEnabled = false;
810#if defined(ARMCOMPUTECL_ENABLED)
811 if (m_ModelContextPtr)
812 {
813 if (m_ModelContextPtr.get() != nullptr)
814 {
815 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
816 if (modelOptions)
817 {
818 isFastMathEnabled = modelOptions->IsFastMathEnabled();
819 }
820 }
821 }
822#endif
823
surmeh013537c2c2018-05-18 16:31:43 +0100824 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
825 reasonIfUnsupported,
826 input,
827 output,
828 descriptor,
829 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100830 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000831 isFastMathEnabled,
832 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000833}
834
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100835bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
836 const TensorInfo& output,
837 const Convolution3dDescriptor& descriptor,
838 const TensorInfo& weights,
839 const Optional<TensorInfo>& biases,
840 Optional<std::string&> reasonIfUnsupported) const
841{
842 bool isFastMathEnabled = false;
843#if defined(ARMCOMPUTECL_ENABLED)
844 if (m_ModelContextPtr)
845{
846 if (m_ModelContextPtr.get() != nullptr)
847 {
848 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
849 if (modelOptions)
850 {
851 isFastMathEnabled = modelOptions->IsFastMathEnabled();
852 }
853 }
854}
855#endif
856
857 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
858 reasonIfUnsupported,
859 input,
860 output,
861 descriptor,
862 weights,
863 biases,
864 isFastMathEnabled,
865 nullptr);
866}
867
Jim Flynn983daec2019-05-29 16:20:16 +0100868bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
869 const TensorInfo& output,
870 Optional<std::string&> reasonIfUnsupported) const
871{
872 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
873 reasonIfUnsupported,
874 input,
875 output);
876}
877
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100878bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
879 const TensorInfo& output,
880 const DepthToSpaceDescriptor& descriptor,
881 Optional<std::string&> reasonIfUnsupported) const
882{
883 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
884 reasonIfUnsupported,
885 input,
886 output,
887 descriptor);
888}
889
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100890bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
891 const TensorInfo& output,
892 const DepthwiseConvolution2dDescriptor& descriptor,
893 const TensorInfo& weights,
894 const Optional<TensorInfo>& biases,
895 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000896{
telsoa01c577f2c2018-08-31 09:22:23 +0100897 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
898 reasonIfUnsupported,
899 input,
900 output,
901 descriptor,
902 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000903 biases,
904 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000905}
906
Pablo Tellof0bd6832019-04-26 17:58:13 +0100907bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
908 const TensorInfo& output,
909 const DepthwiseConvolution2dDescriptor& descriptor,
910 const TensorInfo& weights,
911 const Optional<TensorInfo>& biases,
912 Optional<std::string&> reasonIfUnsupported) const
913{
914 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
915 reasonIfUnsupported,
916 input,
917 output,
918 descriptor,
919 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000920 biases,
921 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100922}
923
924
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100925bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
926 const TensorInfo& input1,
927 const TensorInfo& output,
928 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100929{
930 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
931 reasonIfUnsupported,
932 input0,
933 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000934 output,
935 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100936}
937
josh minor4a3c6102020-01-06 16:40:46 -0600938bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
939 const TensorInfo& output,
940 const ElementwiseUnaryDescriptor& descriptor,
941 Optional<std::string&> reasonIfUnsupported) const
942{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100943 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600944 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100945 case UnaryOperation::Abs:
946 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
947 reasonIfUnsupported,
948 input,
949 output);
950 case UnaryOperation::Exp:
951 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
952 reasonIfUnsupported,
953 input,
954 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100955 case UnaryOperation::Log:
956 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
957 reasonIfUnsupported,
958 input,
959 output);
960 case UnaryOperation::LogicalNot:
961 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
962 reasonIfUnsupported,
963 input,
964 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100965 case UnaryOperation::Neg:
966 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
967 reasonIfUnsupported,
968 input,
969 output);
970 case UnaryOperation::Rsqrt:
971 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
972 reasonIfUnsupported,
973 input,
974 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100975 case UnaryOperation::Sin:
976 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +0000977 reasonIfUnsupported,
978 input,
979 output);
Teresa Charlinaac61122022-05-05 16:11:36 +0100980 case UnaryOperation::Sqrt:
981 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSqrtWorkloadValidate,
982 reasonIfUnsupported,
983 input,
984 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100985 default:
986 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600987 }
josh minor4a3c6102020-01-06 16:40:46 -0600988}
989
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100990bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
991 const TensorInfo& output,
992 const FillDescriptor& descriptor,
993 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +0100994{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100995 armnn::IgnoreUnused(input);
996 armnn::IgnoreUnused(output);
997 armnn::IgnoreUnused(descriptor);
998
999 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +01001000}
1001
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001002bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
1003 const TensorInfo& output,
1004 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +00001005{
Sadik Armagan9be49162019-10-30 16:15:26 +00001006 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
1007 reasonIfUnsupported,
1008 input,
1009 output);
telsoa01c577f2c2018-08-31 09:22:23 +01001010}
1011
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001012bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
1013 const TensorInfo& output,
1014 const TensorInfo& weights,
1015 const TensorInfo& biases,
1016 const FullyConnectedDescriptor& descriptor,
1017 Optional<std::string&> reasonIfUnsupported) const
1018{
1019 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
1020 reasonIfUnsupported,
1021 input,
1022 output,
1023 weights,
1024 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001025 descriptor,
1026 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001027}
1028
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001029bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1030 const TensorInfo& input1,
1031 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001032 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001033 Optional<std::string&> reasonIfUnsupported) const
1034{
1035 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1036 reasonIfUnsupported,
1037 input0,
1038 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001039 output,
1040 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001041}
1042
Teresa Charlin989e2f62022-04-27 16:26:11 +01001043bool ClLayerSupport::IsGatherNdSupported(const TensorInfo& input0,
1044 const TensorInfo& input1,
1045 const TensorInfo& output,
1046 Optional<std::string&> reasonIfUnsupported) const
1047{
1048 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherNdWorkloadValidate,
1049 reasonIfUnsupported,
1050 input0,
1051 input1,
1052 output);
1053}
1054
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001055bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1056 Optional<std::string&> reasonIfUnsupported) const
1057{
Derek Lamberti901ea112019-12-10 22:07:09 +00001058 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001059}
1060
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001061bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1062 const TensorInfo& output,
1063 const InstanceNormalizationDescriptor& descriptor,
1064 Optional<std::string&> reasonIfUnsupported) const
1065{
1066 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1067 reasonIfUnsupported,
1068 input,
1069 output,
1070 descriptor);
1071}
1072
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001073bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1074 const TensorInfo& output,
1075 const L2NormalizationDescriptor& descriptor,
1076 Optional<std::string&> reasonIfUnsupported) const
1077{
1078 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1079 reasonIfUnsupported,
1080 input,
1081 output,
1082 descriptor);
1083}
1084
James Conroyfe3ec942020-11-18 14:20:53 +00001085bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1086 const TensorInfo& input1,
1087 const TensorInfo& output,
1088 const LogicalBinaryDescriptor& descriptor,
1089 Optional<std::string&> reasonIfUnsupported) const
1090{
1091 IgnoreUnused(output);
1092
1093 switch(descriptor.m_Operation)
1094 {
1095 case LogicalBinaryOperation::LogicalAnd:
1096 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1097 reasonIfUnsupported,
1098 input0,
1099 input1,
1100 output);
1101 case LogicalBinaryOperation::LogicalOr:
1102 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1103 reasonIfUnsupported,
1104 input0,
1105 input1,
1106 output);
1107 default:
1108 return false;
1109 }
1110}
1111
1112
Teresa Charlin8398edc2020-07-20 14:23:02 +01001113bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1114 const TensorInfo& output,
1115 const LogSoftmaxDescriptor& descriptor,
1116 Optional<std::string&> reasonIfUnsupported) const
1117{
1118 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1119 reasonIfUnsupported,
1120 input,
1121 output,
1122 descriptor);
1123}
1124
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001125bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1126 const TensorInfo& outputStateIn,
1127 const TensorInfo& cellStateIn,
1128 const TensorInfo& scratchBuffer,
1129 const TensorInfo& outputStateOut,
1130 const TensorInfo& cellStateOut,
1131 const TensorInfo& output,
1132 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001133 const LstmInputParamsInfo& paramsInfo,
1134 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001135{
arovir01085f0a42018-10-08 14:48:19 +01001136 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1137 reasonIfUnsupported,
1138 input,
1139 outputStateIn,
1140 cellStateIn,
1141 scratchBuffer,
1142 outputStateOut,
1143 cellStateOut,
1144 output,
1145 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001146 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001147}
1148
keidav01a959ee52018-12-19 10:04:58 +00001149bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1150 const TensorInfo& input1,
1151 const TensorInfo& output,
1152 Optional<std::string&> reasonIfUnsupported) const
1153{
1154 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1155 reasonIfUnsupported,
1156 input0,
1157 input1,
1158 output);
1159}
1160
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001161bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1162 const TensorInfo& output,
1163 const MeanDescriptor& descriptor,
1164 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001165{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001166 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1167 reasonIfUnsupported,
1168 input,
1169 output,
1170 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001171}
1172
saoste019292aa32019-01-08 13:55:59 +00001173bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1174 const TensorInfo& input1,
1175 const TensorInfo& output,
1176 Optional<std::string&> reasonIfUnsupported) const
1177{
1178 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1179 reasonIfUnsupported,
1180 input0,
1181 input1,
1182 output);
1183}
1184
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001185bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1186 const TensorInfo& input1,
1187 const TensorInfo& output,
1188 Optional<std::string&> reasonIfUnsupported) const
1189{
1190 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1191 reasonIfUnsupported,
1192 input0,
1193 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001194 output,
1195 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001196}
1197
1198bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1199 const TensorInfo& output,
1200 const NormalizationDescriptor& descriptor,
1201 Optional<std::string&> reasonIfUnsupported) const
1202{
1203 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1204}
1205
1206bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1207 Optional<std::string&> reasonIfUnsupported) const
1208{
Derek Lamberti901ea112019-12-10 22:07:09 +00001209 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001210}
1211
1212bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1213 const TensorInfo& output,
1214 const PadDescriptor& descriptor,
1215 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001216{
1217 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1218 reasonIfUnsupported,
1219 input,
1220 output,
1221 descriptor);
1222}
1223
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001224bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1225 const TensorInfo& output,
1226 const PermuteDescriptor& descriptor,
1227 Optional<std::string&> reasonIfUnsupported) const
1228{
Matthew Bentham9820d302019-11-27 17:24:47 +00001229 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001230}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001231
1232bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1233 const TensorInfo& output,
1234 const Pooling2dDescriptor& descriptor,
1235 Optional<std::string&> reasonIfUnsupported) const
1236{
1237 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1238}
1239
Ryan OSheabab8fa92022-03-09 10:29:02 +00001240bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1241 const TensorInfo& output,
1242 const Pooling3dDescriptor& descriptor,
1243 Optional<std::string&> reasonIfUnsupported) const
1244{
1245 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1246}
1247
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001248bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1249 const armnn::TensorInfo &alpha,
1250 const armnn::TensorInfo &output,
1251 armnn::Optional<std::string &> reasonIfUnsupported) const
1252{
1253 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1254}
1255
Ryan OShea2323af42020-05-13 16:36:19 +01001256bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1257 const TensorInfo& previousOutputIn,
1258 const TensorInfo& previousCellStateIn,
1259 const TensorInfo& outputStateOut,
1260 const TensorInfo& cellStateOut,
1261 const TensorInfo& output,
1262 const QLstmDescriptor& descriptor,
1263 const LstmInputParamsInfo& paramsInfo,
1264 Optional<std::string&> reasonIfUnsupported) const
1265{
1266 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1267 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1268 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1269 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1270 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1271 output.GetDataType() == armnn::DataType::QAsymmS8)
1272 {
1273 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1274 reasonIfUnsupported,
1275 input,
1276 previousCellStateIn,
1277 previousOutputIn,
1278 cellStateOut,
1279 outputStateOut,
1280 output,
1281 descriptor,
1282 paramsInfo);
1283 }
1284 else
1285 {
1286 return false;
1287 }
1288}
1289
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001290bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1291 const TensorInfo& previousCellStateIn,
1292 const TensorInfo& previousOutputIn,
1293 const TensorInfo& cellStateOut,
1294 const TensorInfo& output,
1295 const QuantizedLstmInputParamsInfo& paramsInfo,
1296 Optional<std::string&> reasonIfUnsupported) const
1297{
1298 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1299 reasonIfUnsupported,
1300 input,
1301 previousCellStateIn,
1302 previousOutputIn,
1303 cellStateOut,
1304 output,
1305 paramsInfo);
1306}
1307
Sadik Armagan20ec2492019-05-31 09:09:44 +01001308bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1309 const TensorInfo& output,
1310 Optional<std::string&> reasonIfUnsupported) const
1311{
1312 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1313 reasonIfUnsupported,
1314 input,
1315 output);
1316}
1317
Sadik Armagana2747482021-02-09 10:28:54 +00001318bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1319 const TensorInfo& output,
1320 const ReduceDescriptor& descriptor,
1321 Optional<std::string&> reasonIfUnsupported) const
1322{
1323 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1324 reasonIfUnsupported,
1325 input,
1326 output,
1327 descriptor);
1328}
1329
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001330bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001331 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001332 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001333 Optional<std::string&> reasonIfUnsupported) const
1334{
Jan Eilers8eb25602020-03-09 12:13:48 +00001335 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001336 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001337}
1338
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001339bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1340 const TensorInfo& output,
1341 const ResizeDescriptor& descriptor,
1342 Optional<std::string&> reasonIfUnsupported) const
1343{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001344 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001345}
1346
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001347bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1348 const TensorInfo& output,
1349 const SliceDescriptor& descriptor,
1350 Optional<std::string&> reasonIfUnsupported) const
1351{
1352 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1353}
1354
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001355bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1356 const TensorInfo& output,
1357 const SoftmaxDescriptor& descriptor,
1358 Optional<std::string&> reasonIfUnsupported) const
1359{
Francis Murtagh3b938352019-07-26 15:44:17 +01001360 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001361}
1362
Sadik Armaganf4464322018-12-20 16:19:12 +00001363bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1364 const TensorInfo& output,
1365 const SpaceToBatchNdDescriptor& descriptor,
1366 Optional<std::string&> reasonIfUnsupported) const
1367{
1368 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1369 reasonIfUnsupported,
1370 input,
1371 output,
1372 descriptor);
1373}
1374
James Conroyd2aa85e2019-07-01 17:12:40 +01001375bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1376 const TensorInfo& output,
1377 const SpaceToDepthDescriptor& descriptor,
1378 Optional<std::string&> reasonIfUnsupported) const
1379{
1380 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1381 reasonIfUnsupported,
1382 input,
1383 output,
1384 descriptor);
1385}
1386
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001387bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001388 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1389 const ViewsDescriptor& descriptor,
1390 Optional<std::string&> reasonIfUnsupported) const
1391{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001392#if defined(ARMCOMPUTECL_ENABLED)
1393 // Split along the last dimension, cannot use sub-tensors
1394 // as width and height of the sub-tensors do not match
1395 // the width and height of the parent tensor
1396 // in case of input with more than 2D.
1397 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1398 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1399 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1400 {
1401 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1402 reasonIfUnsupported,
1403 input,
1404 outputs,
1405 *splitAxis.begin());
1406 }
1407#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001408 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001409 for (auto output : outputs)
1410 {
1411 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1412 {
1413 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1414 return false;
1415 }
1416 }
1417 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001418}
1419
Matthew Jacksond5166102019-07-31 14:06:28 +01001420bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1421 const TensorInfo& output,
1422 const StackDescriptor& descriptor,
1423 Optional<std::string&> reasonIfUnsupported) const
1424{
1425 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1426 reasonIfUnsupported,
1427 inputs,
1428 output,
1429 descriptor);
1430}
1431
keidav01d74dc912018-12-10 18:16:07 +00001432bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1433 const TensorInfo& output,
1434 const StridedSliceDescriptor& descriptor,
1435 Optional<std::string&> reasonIfUnsupported) const
1436{
1437 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1438 reasonIfUnsupported,
1439 input,
1440 output,
1441 descriptor);
1442}
1443
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001444bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1445 const TensorInfo& input1,
1446 const TensorInfo& output,
1447 Optional<std::string&> reasonIfUnsupported) const
1448{
1449 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1450 reasonIfUnsupported,
1451 input0,
1452 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001453 output,
1454 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001455}
1456
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001457bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1458 const TensorInfo& output,
1459 const TransposeConvolution2dDescriptor& descriptor,
1460 const TensorInfo& weights,
1461 const Optional<TensorInfo>& biases,
1462 Optional<std::string&> reasonIfUnsupported) const
1463{
1464 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1465 reasonIfUnsupported,
1466 input,
1467 output,
1468 descriptor,
1469 weights,
1470 biases);
1471}
1472
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001473bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1474 const TensorInfo& output,
1475 const TransposeDescriptor& descriptor,
1476 Optional<std::string&> reasonIfUnsupported) const
1477{
1478 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1479}
1480
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001481bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1482 const TensorInfo& outputStateIn,
1483 const TensorInfo& cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001484 const TensorInfo& outputStateOut,
1485 const TensorInfo& cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001486 const TensorInfo& output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001487 const UnidirectionalSequenceLstmDescriptor& descriptor,
1488 const LstmInputParamsInfo& paramsInfo,
1489 Optional<std::string&> reasonIfUnsupported) const
1490{
1491 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1492 reasonIfUnsupported,
1493 input,
1494 outputStateIn,
1495 cellStateIn,
Mike Kelly12994962022-04-21 11:57:09 +01001496 outputStateOut,
1497 cellStateOut,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001498 output,
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001499 descriptor,
1500 paramsInfo);
1501}
1502
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001503} // namespace armnn