blob: 4dcaca957687c453997940ada2ce8d4ec657bbf4 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010025#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000026#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010027#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010028#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000029#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010030#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010031#include "workloads/ClConvertFp16ToFp32Workload.hpp"
32#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010033#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010034#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010035#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010036#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010037#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010038#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010039#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010040#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000041#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010042#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010043#include "workloads/ClGatherWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010044#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010045#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010046#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010047#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000048#include "workloads/ClLogicalAndWorkload.hpp"
49#include "workloads/ClLogicalNotWorkload.hpp"
50#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010051#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000052#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010053#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010054#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000055#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010056#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000057#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010058#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010059#include "workloads/ClPadWorkload.hpp"
60#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010061#include "workloads/ClPooling2dWorkload.hpp"
Ryan OSheabab8fa92022-03-09 10:29:02 +000062#include "workloads/ClPooling3dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010063#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010064#include "workloads/ClQLstmWorkload.hpp"
65#include "workloads/ClQuantizedLstmWorkload.hpp"
66#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000067#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000068#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010069#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010070#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010071#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000072#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010073#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000074#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010075#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010076#include "workloads/ClSplitterWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010077#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000078#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010079#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010080#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000081#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000082#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000083#endif
84
telsoa014fcda012018-03-09 14:13:49 +000085
86namespace armnn
87{
arovir017c22c702018-10-09 11:16:46 +010088
telsoa014fcda012018-03-09 14:13:49 +000089namespace
90{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010091
telsoa014fcda012018-03-09 14:13:49 +000092template<unsigned int FilterSize>
93bool IsMatchingSize2d(const TensorInfo& weightInfo)
94{
telsoa01c577f2c2018-08-31 09:22:23 +010095 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000096 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
97}
98
99template<uint32_t ValidStride>
100bool IsMatchingStride(uint32_t actualStride)
101{
102 return ValidStride == actualStride;
103}
104
105template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
106bool IsMatchingStride(uint32_t actualStride)
107{
108 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100109}
telsoa014fcda012018-03-09 14:13:49 +0000110
Derek Lamberti901ea112019-12-10 22:07:09 +0000111template<typename ... Args>
112bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000113{
Jan Eilers8eb25602020-03-09 12:13:48 +0000114 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000115#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000116 return true;
117#else
arovir01085f0a42018-10-08 14:48:19 +0100118 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000119 {
arovir01085f0a42018-10-08 14:48:19 +0100120 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000121 }
122 return false;
123#endif
124}
125
Matteo Martincighd95e9062019-01-31 15:35:59 +0000126#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000127#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
128#else
129#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
130#endif
131
Matteo Martincighd95e9062019-01-31 15:35:59 +0000132#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000133template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100134inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000135{
136 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
137 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
138 if (!supported && reasonIfUnsupported)
139 {
arovir01085f0a42018-10-08 14:48:19 +0100140 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000141 }
142 return supported;
143}
144
145#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
146 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
147#else
148#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000149 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000150#endif
151
telsoa01c577f2c2018-08-31 09:22:23 +0100152template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100153bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000154 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100155 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000156 Uint8Func uint8FuncPtr,
157 Params&&... params)
158{
159 return IsClBackendSupported(reasonIfUnsupported) &&
160 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
161 dataType,
162 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100163 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000164 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000165 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000166 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000167 std::forward<Params>(params)...);
168}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100169} // anonymous namespace
170
Sadik Armagan045f6be2020-09-10 13:37:32 +0100171ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
172 : m_ModelContextPtr(modelContextPtr)
173{
174}
175
176ClLayerSupport::ClLayerSupport()
177 : m_ModelContextPtr(nullptr)
178{
179}
180
Cathal Corbett34b429c2021-12-24 12:24:40 +0000181bool ClLayerSupport::IsLayerSupported(const LayerType& type,
182 const std::vector<TensorInfo>& infos,
183 const BaseDescriptor& descriptor,
184 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
185 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
186 Optional<std::string&> reasonIfUnsupported) const
187{
188 switch (type)
189 {
190 case LayerType::Activation:
191 return IsActivationSupported(infos[0],
192 infos[1],
193 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
194 reasonIfUnsupported);
195 case LayerType::Addition:
196 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
197 case LayerType::ArgMinMax:
198 return IsArgMinMaxSupported(infos[0],
199 infos[1],
200 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
201 reasonIfUnsupported);
202 case LayerType::BatchNormalization:
203 return IsBatchNormalizationSupported(infos[0],
204 infos[1],
205 infos[2],
206 infos[3],
207 infos[4],
208 infos[5],
209 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
210 (&descriptor)),
211 reasonIfUnsupported);
212 case LayerType::BatchToSpaceNd:
213 return IsBatchToSpaceNdSupported(infos[0],
214 infos[1],
215 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
216 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000217 case LayerType::Cast:
218 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
219 case LayerType::ChannelShuffle:
220 return IsChannelShuffleSupported(infos[0],
221 infos[1],
222 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
223 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000224 case LayerType::Comparison:
225 return IsComparisonSupported(infos[0],
226 infos[1],
227 infos[2],
228 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
229 reasonIfUnsupported);
230 case LayerType::Concat:
231 {
232 std::vector<const TensorInfo*> inputInfos;
233 for (uint32_t i = 0; i < (infos.size() - 1); i++)
234 {
235 inputInfos.push_back(&infos[i]);
236 }
237 return IsConcatSupported(inputInfos,
238 infos[infos.size() - 1],
239 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
240 reasonIfUnsupported);
241 }
242 case LayerType::Constant:
243 return IsConstantSupported(infos[0], reasonIfUnsupported);
244 case LayerType::ConvertFp16ToFp32:
245 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
246 case LayerType::ConvertFp32ToFp16:
247 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000248 case LayerType::ConvertBf16ToFp32:
249 return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
250 infos[1],
251 reasonIfUnsupported);
252 case LayerType::ConvertFp32ToBf16:
253 return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
254 infos[1],
255 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000256 case LayerType::Convolution2d:
257 {
258 if (infos.size() != 4)
259 {
260 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
261 "TensorInfos should be of format: {input, output, weights, biases}.");
262 }
263
264 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
265 if (infos[3] == TensorInfo())
266 {
267 return IsConvolution2dSupported(infos[0],
268 infos[1],
269 desc,
270 infos[2],
271 EmptyOptional(),
272 reasonIfUnsupported);
273 }
274 else
275 {
276 return IsConvolution2dSupported(infos[0],
277 infos[1],
278 desc,
279 infos[2],
280 infos[3],
281 reasonIfUnsupported);
282 }
283 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000284 case LayerType::Convolution3d:
285 {
286 if (infos.size() != 4)
287 {
288 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
289 "TensorInfos should be of format: {input, output, weights, biases}.");
290 }
291
292 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
293 if (infos[3] == TensorInfo())
294 {
295 return IsConvolution3dSupported(infos[0],
296 infos[1],
297 desc,
298 infos[2],
299 EmptyOptional(),
300 reasonIfUnsupported);
301 }
302 else
303 {
304 return IsConvolution3dSupported(infos[0],
305 infos[1],
306 desc,
307 infos[2],
308 infos[3],
309 reasonIfUnsupported);
310 }
311 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000312 case LayerType::DepthToSpace:
313 return IsDepthToSpaceSupported(infos[0],
314 infos[1],
315 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
316 reasonIfUnsupported);
317 case LayerType::DepthwiseConvolution2d:
318 {
319 if (infos.size() != 4)
320 {
321 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
322 "TensorInfos should be of format: {input, output, weights, biases}.");
323 }
324
325 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
326 if (infos[3] == TensorInfo())
327 {
328 return IsDepthwiseConvolutionSupported(infos[0],
329 infos[1],
330 desc,
331 infos[2],
332 EmptyOptional(),
333 reasonIfUnsupported);
334 }
335 else
336 {
337 return IsDepthwiseConvolutionSupported(infos[0],
338 infos[1],
339 desc,
340 infos[2],
341 infos[3],
342 reasonIfUnsupported);
343 }
344 }
345 case LayerType::Dequantize:
346 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
347 case LayerType::Division:
348 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
349 case LayerType::ElementwiseUnary:
350 return IsElementwiseUnarySupported(infos[0],
351 infos[1],
352 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
353 reasonIfUnsupported);
354 case LayerType::Fill:
355 return IsFillSupported(infos[0],
356 infos[1],
357 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
358 reasonIfUnsupported);
359 case LayerType::Floor:
360 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
361 case LayerType::FullyConnected:
362 return IsFullyConnectedSupported(infos[0],
363 infos[1],
364 infos[2],
365 infos[3],
366 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
367 reasonIfUnsupported);
368 case LayerType::Gather:
369 return IsGatherSupported(infos[0],
370 infos[1],
371 infos[2],
372 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
373 reasonIfUnsupported);
374 case LayerType::Input:
375 return IsInputSupported(infos[0], reasonIfUnsupported);
376 case LayerType::InstanceNormalization:
377 return IsInstanceNormalizationSupported(infos[0],
378 infos[1],
379 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
380 (&descriptor)),
381 reasonIfUnsupported);
382 case LayerType::L2Normalization:
383 return IsL2NormalizationSupported(infos[0],
384 infos[1],
385 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
386 reasonIfUnsupported);
387 case LayerType::LogicalBinary:
388 return IsLogicalBinarySupported(infos[0],
389 infos[1],
390 infos[2],
391 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
392 reasonIfUnsupported);
393 case LayerType::LogSoftmax:
394 return IsLogSoftmaxSupported(infos[0],
395 infos[1],
396 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
397 reasonIfUnsupported);
398 case LayerType::Lstm:
399 return IsLstmSupported(infos[0],
400 infos[1],
401 infos[2],
402 infos[3],
403 infos[4],
404 infos[5],
405 infos[6],
406 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
407 lstmParamsInfo.value(),
408 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000409 case LayerType::Map:
410 return true;
411 case LayerType::MemCopy:
412 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
413 case LayerType::MemImport:
414 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
415 case LayerType::Merge:
416 return LayerSupportBase::IsMergeSupported(infos[0],
417 infos[1],
418 infos[2],
419 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000420 case LayerType::Maximum:
421 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
422 case LayerType::Mean:
423 return IsMeanSupported(infos[0],
424 infos[1],
425 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
426 reasonIfUnsupported);
427 case LayerType::Minimum:
428 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
429 case LayerType::Multiplication:
430 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
431 case LayerType::Normalization:
432 return IsNormalizationSupported(infos[0],
433 infos[1],
434 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
435 reasonIfUnsupported);
436 case LayerType::Output:
437 return IsOutputSupported(infos[0], reasonIfUnsupported);
438 case LayerType::Pad:
439 return IsPadSupported(infos[0],
440 infos[1],
441 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
442 reasonIfUnsupported);
443 case LayerType::Permute:
444 return IsPermuteSupported(infos[0],
445 infos[1],
446 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
447 reasonIfUnsupported);
448 case LayerType::Pooling2d:
449 return IsPooling2dSupported(infos[0],
450 infos[1],
451 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
452 reasonIfUnsupported);
Ryan OSheabab8fa92022-03-09 10:29:02 +0000453 case LayerType::Pooling3d:
454 return IsPooling3dSupported(infos[0],
455 infos[1],
456 *(PolymorphicDowncast<const Pooling3dDescriptor*>(&descriptor)),
457 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000458 case LayerType::Prelu:
459 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000460 case LayerType::QLstm:
461 return IsQLstmSupported(infos[0],
462 infos[1],
463 infos[2],
464 infos[3],
465 infos[4],
466 infos[5],
467 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
468 lstmParamsInfo.value(),
469 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000470 case LayerType::Quantize:
471 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
472 case LayerType::QuantizedLstm:
473 return IsQuantizedLstmSupported(infos[0],
474 infos[1],
475 infos[2],
476 infos[3],
477 infos[4],
478 quantizedLstmParamsInfo.value(),
479 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000480 case LayerType::Rank:
481 return true;
482 case LayerType::Reduce:
483 return IsReduceSupported(infos[0],
484 infos[1],
485 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
486 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000487 case LayerType::Reshape:
488 return IsReshapeSupported(infos[0],
489 infos[1],
490 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
491 reasonIfUnsupported);
492 case LayerType::Resize:
493 return IsResizeSupported(infos[0],
494 infos[1],
495 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
496 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000497 case LayerType::Shape:
498 return LayerSupportBase::IsShapeSupported(infos[0],
499 infos[1],
500 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000501 case LayerType::Slice:
502 return IsSliceSupported(infos[0],
503 infos[1],
504 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
505 reasonIfUnsupported);
506 case LayerType::Softmax:
507 return IsSoftmaxSupported(infos[0],
508 infos[1],
509 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
510 reasonIfUnsupported);
511 case LayerType::SpaceToBatchNd:
512 return IsSpaceToBatchNdSupported(infos[0],
513 infos[1],
514 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
515 reasonIfUnsupported);
516 case LayerType::SpaceToDepth:
517 return IsSpaceToDepthSupported(infos[0],
518 infos[1],
519 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
520 reasonIfUnsupported);
521 case LayerType::Splitter:
522 {
523 std::vector<TensorInfo> outputInfos;
524 for (uint32_t i = 1; i < infos.size(); i++)
525 {
526 outputInfos.push_back(infos[i]);
527 }
528 return IsSplitterSupported(infos[0],
529 {outputInfos.begin(), outputInfos.end()},
530 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
531 reasonIfUnsupported);
532 }
533 case LayerType::Stack:
534 {
535 std::vector<const TensorInfo*> inputInfos;
536 for (uint32_t i = 0; i < infos.size() - 1; i++)
537 {
538 inputInfos.push_back(&infos[i]);
539 }
540 return IsStackSupported(inputInfos,
541 infos[infos.size() - 1],
542 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
543 reasonIfUnsupported);
544 }
545 case LayerType::StridedSlice:
546 return IsStridedSliceSupported(infos[0],
547 infos[1],
548 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
549 reasonIfUnsupported);
550 case LayerType::Subtraction:
551 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
552 case LayerType::Transpose:
553 return IsTransposeSupported(infos[0],
554 infos[1],
555 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
556 reasonIfUnsupported);
557 case LayerType::TransposeConvolution2d:
558 {
559 if (infos.size() != 4)
560 {
561 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
562 "TensorInfos should be of format: {input, output, weights, biases}.");
563 }
564
565 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
566 if (infos[3] == TensorInfo())
567 {
568 return IsTransposeConvolution2dSupported(infos[0],
569 infos[1],
570 desc,
571 infos[2],
572 EmptyOptional(),
573 reasonIfUnsupported);
574 }
575 else
576 {
577 return IsTransposeConvolution2dSupported(infos[0],
578 infos[1],
579 desc,
580 infos[2],
581 infos[3],
582 reasonIfUnsupported);
583 }
584 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000585 case LayerType::UnidirectionalSequenceLstm:
586 return IsUnidirectionalSequenceLstmSupported(infos[0],
587 infos[1],
588 infos[2],
589 infos[3],
590 infos[4],
591 infos[5],
592 *(PolymorphicDowncast<const
593 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
594 lstmParamsInfo.value(),
595 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000596 case LayerType::Unmap:
597 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000598 default:
599 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000600 // debug, detectionpostprocess, fakequantization,
601 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000602 return false;
603 }
604}
605
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100606bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
607 const TensorInfo& output,
608 const ActivationDescriptor& descriptor,
609 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000610{
telsoa01c577f2c2018-08-31 09:22:23 +0100611 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
612 reasonIfUnsupported,
613 input,
614 output,
615 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000616}
617
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100618bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
619 const TensorInfo& input1,
620 const TensorInfo& output,
621 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000622{
arovir01085f0a42018-10-08 14:48:19 +0100623 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
624 reasonIfUnsupported,
625 input0,
626 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000627 output,
628 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000629}
630
James Conroy2dc05722019-09-19 17:00:31 +0100631bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
632 const TensorInfo& output,
633 const ArgMinMaxDescriptor& descriptor,
634 Optional<std::string&> reasonIfUnsupported) const
635{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000636
James Conroy2dc05722019-09-19 17:00:31 +0100637 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
638 reasonIfUnsupported,
639 input,
640 output,
641 descriptor);
642}
643
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100644bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
645 const TensorInfo& output,
646 const TensorInfo& mean,
647 const TensorInfo& var,
648 const TensorInfo& beta,
649 const TensorInfo& gamma,
650 const BatchNormalizationDescriptor& descriptor,
651 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000652{
telsoa01c577f2c2018-08-31 09:22:23 +0100653 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
654 reasonIfUnsupported,
655 input,
656 output,
657 mean,
658 var,
659 beta,
660 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000661 descriptor,
662 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000663}
664
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100665bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
666 const TensorInfo& output,
667 const BatchToSpaceNdDescriptor& descriptor,
668 Optional<std::string&> reasonIfUnsupported) const
669{
670 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
671 reasonIfUnsupported,
672 input,
673 output,
674 descriptor);
675}
676
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100677bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
678 const TensorInfo& output,
679 Optional<std::string&> reasonIfUnsupported) const
680{
681 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
682 reasonIfUnsupported,
683 input,
684 output);
685}
686
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100687bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000688 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100689 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000690 Optional<std::string&> reasonIfUnsupported) const
691{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100692 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000693 reasonIfUnsupported,
694 input,
695 output,
696 descriptor);
697}
698
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100699bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
700 const TensorInfo& input1,
701 const TensorInfo& output,
702 const ComparisonDescriptor& descriptor,
703 Optional<std::string&> reasonIfUnsupported) const
704{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000705 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
706 reasonIfUnsupported,
707 input0,
708 input1,
709 output,
710 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100711}
712
Jim Flynn906f9462019-05-10 13:55:21 +0100713bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
714 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000715 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100716 Optional<std::string&> reasonIfUnsupported) const
717{
Jim Flynne242f2d2019-05-22 14:24:13 +0100718 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
719 {
720 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
721 return false;
722 }
723
724 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
725 if(concatInnerAxis < 3) // Width, height, or channels
726 {
727 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
728 reasonIfUnsupported,
729 inputs,
730 output,
731 descriptor);
732 }
733 else if (concatInnerAxis == 3)
734 {
735 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
736 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
737 for (auto& input : inputs)
738 {
739 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
740 {
741 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
742 return false;
743 }
744 }
745 return true; // Sub-tensors support concat along batch
746 }
747 else // > 4 dimensions not supported.
748 {
749 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
750 return false;
751 }
Jim Flynn906f9462019-05-10 13:55:21 +0100752}
753
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100754bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
755 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000756{
Mike Kelly0886ac42020-04-27 09:55:40 +0100757 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
758 reasonIfUnsupported,
759 output);
telsoa014fcda012018-03-09 14:13:49 +0000760}
761
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100762bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
763 const TensorInfo& output,
764 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000765{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100766 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
767 reasonIfUnsupported,
768 input,
769 output);
telsoa014fcda012018-03-09 14:13:49 +0000770}
771
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100772bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
773 const TensorInfo& output,
774 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000775{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100776 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
777 reasonIfUnsupported,
778 input,
779 output);
telsoa014fcda012018-03-09 14:13:49 +0000780}
781
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100782bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
783 const TensorInfo& output,
784 const Convolution2dDescriptor& descriptor,
785 const TensorInfo& weights,
786 const Optional<TensorInfo>& biases,
787 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000788{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100789 bool isFastMathEnabled = false;
790#if defined(ARMCOMPUTECL_ENABLED)
791 if (m_ModelContextPtr)
792 {
793 if (m_ModelContextPtr.get() != nullptr)
794 {
795 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
796 if (modelOptions)
797 {
798 isFastMathEnabled = modelOptions->IsFastMathEnabled();
799 }
800 }
801 }
802#endif
803
surmeh013537c2c2018-05-18 16:31:43 +0100804 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
805 reasonIfUnsupported,
806 input,
807 output,
808 descriptor,
809 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100810 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000811 isFastMathEnabled,
812 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000813}
814
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100815bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
816 const TensorInfo& output,
817 const Convolution3dDescriptor& descriptor,
818 const TensorInfo& weights,
819 const Optional<TensorInfo>& biases,
820 Optional<std::string&> reasonIfUnsupported) const
821{
822 bool isFastMathEnabled = false;
823#if defined(ARMCOMPUTECL_ENABLED)
824 if (m_ModelContextPtr)
825{
826 if (m_ModelContextPtr.get() != nullptr)
827 {
828 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
829 if (modelOptions)
830 {
831 isFastMathEnabled = modelOptions->IsFastMathEnabled();
832 }
833 }
834}
835#endif
836
837 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
838 reasonIfUnsupported,
839 input,
840 output,
841 descriptor,
842 weights,
843 biases,
844 isFastMathEnabled,
845 nullptr);
846}
847
Jim Flynn983daec2019-05-29 16:20:16 +0100848bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
849 const TensorInfo& output,
850 Optional<std::string&> reasonIfUnsupported) const
851{
852 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
853 reasonIfUnsupported,
854 input,
855 output);
856}
857
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100858bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
859 const TensorInfo& output,
860 const DepthToSpaceDescriptor& descriptor,
861 Optional<std::string&> reasonIfUnsupported) const
862{
863 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
864 reasonIfUnsupported,
865 input,
866 output,
867 descriptor);
868}
869
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100870bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
871 const TensorInfo& output,
872 const DepthwiseConvolution2dDescriptor& descriptor,
873 const TensorInfo& weights,
874 const Optional<TensorInfo>& biases,
875 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000876{
telsoa01c577f2c2018-08-31 09:22:23 +0100877 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
878 reasonIfUnsupported,
879 input,
880 output,
881 descriptor,
882 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000883 biases,
884 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000885}
886
Pablo Tellof0bd6832019-04-26 17:58:13 +0100887bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
888 const TensorInfo& output,
889 const DepthwiseConvolution2dDescriptor& descriptor,
890 const TensorInfo& weights,
891 const Optional<TensorInfo>& biases,
892 Optional<std::string&> reasonIfUnsupported) const
893{
894 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
895 reasonIfUnsupported,
896 input,
897 output,
898 descriptor,
899 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000900 biases,
901 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100902}
903
904
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100905bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
906 const TensorInfo& input1,
907 const TensorInfo& output,
908 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100909{
910 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
911 reasonIfUnsupported,
912 input0,
913 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000914 output,
915 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100916}
917
josh minor4a3c6102020-01-06 16:40:46 -0600918bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
919 const TensorInfo& output,
920 const ElementwiseUnaryDescriptor& descriptor,
921 Optional<std::string&> reasonIfUnsupported) const
922{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100923 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600924 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100925 case UnaryOperation::Abs:
926 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
927 reasonIfUnsupported,
928 input,
929 output);
930 case UnaryOperation::Exp:
931 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
932 reasonIfUnsupported,
933 input,
934 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100935 case UnaryOperation::Log:
936 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
937 reasonIfUnsupported,
938 input,
939 output);
940 case UnaryOperation::LogicalNot:
941 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
942 reasonIfUnsupported,
943 input,
944 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100945 case UnaryOperation::Neg:
946 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
947 reasonIfUnsupported,
948 input,
949 output);
950 case UnaryOperation::Rsqrt:
951 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
952 reasonIfUnsupported,
953 input,
954 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100955 case UnaryOperation::Sin:
956 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +0000957 reasonIfUnsupported,
958 input,
959 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100960 default:
961 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600962 }
josh minor4a3c6102020-01-06 16:40:46 -0600963}
964
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100965bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
966 const TensorInfo& output,
967 const FillDescriptor& descriptor,
968 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +0100969{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100970 armnn::IgnoreUnused(input);
971 armnn::IgnoreUnused(output);
972 armnn::IgnoreUnused(descriptor);
973
974 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +0100975}
976
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100977bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
978 const TensorInfo& output,
979 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000980{
Sadik Armagan9be49162019-10-30 16:15:26 +0000981 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
982 reasonIfUnsupported,
983 input,
984 output);
telsoa01c577f2c2018-08-31 09:22:23 +0100985}
986
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100987bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
988 const TensorInfo& output,
989 const TensorInfo& weights,
990 const TensorInfo& biases,
991 const FullyConnectedDescriptor& descriptor,
992 Optional<std::string&> reasonIfUnsupported) const
993{
994 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
995 reasonIfUnsupported,
996 input,
997 output,
998 weights,
999 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +00001000 descriptor,
1001 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001002}
1003
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001004bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
1005 const TensorInfo& input1,
1006 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001007 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001008 Optional<std::string&> reasonIfUnsupported) const
1009{
1010 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1011 reasonIfUnsupported,
1012 input0,
1013 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001014 output,
1015 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001016}
1017
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001018bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1019 Optional<std::string&> reasonIfUnsupported) const
1020{
Derek Lamberti901ea112019-12-10 22:07:09 +00001021 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001022}
1023
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001024bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1025 const TensorInfo& output,
1026 const InstanceNormalizationDescriptor& descriptor,
1027 Optional<std::string&> reasonIfUnsupported) const
1028{
1029 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1030 reasonIfUnsupported,
1031 input,
1032 output,
1033 descriptor);
1034}
1035
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001036bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1037 const TensorInfo& output,
1038 const L2NormalizationDescriptor& descriptor,
1039 Optional<std::string&> reasonIfUnsupported) const
1040{
1041 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1042 reasonIfUnsupported,
1043 input,
1044 output,
1045 descriptor);
1046}
1047
James Conroyfe3ec942020-11-18 14:20:53 +00001048bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1049 const TensorInfo& input1,
1050 const TensorInfo& output,
1051 const LogicalBinaryDescriptor& descriptor,
1052 Optional<std::string&> reasonIfUnsupported) const
1053{
1054 IgnoreUnused(output);
1055
1056 switch(descriptor.m_Operation)
1057 {
1058 case LogicalBinaryOperation::LogicalAnd:
1059 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1060 reasonIfUnsupported,
1061 input0,
1062 input1,
1063 output);
1064 case LogicalBinaryOperation::LogicalOr:
1065 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1066 reasonIfUnsupported,
1067 input0,
1068 input1,
1069 output);
1070 default:
1071 return false;
1072 }
1073}
1074
1075
Teresa Charlin8398edc2020-07-20 14:23:02 +01001076bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1077 const TensorInfo& output,
1078 const LogSoftmaxDescriptor& descriptor,
1079 Optional<std::string&> reasonIfUnsupported) const
1080{
1081 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1082 reasonIfUnsupported,
1083 input,
1084 output,
1085 descriptor);
1086}
1087
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001088bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1089 const TensorInfo& outputStateIn,
1090 const TensorInfo& cellStateIn,
1091 const TensorInfo& scratchBuffer,
1092 const TensorInfo& outputStateOut,
1093 const TensorInfo& cellStateOut,
1094 const TensorInfo& output,
1095 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001096 const LstmInputParamsInfo& paramsInfo,
1097 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001098{
arovir01085f0a42018-10-08 14:48:19 +01001099 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1100 reasonIfUnsupported,
1101 input,
1102 outputStateIn,
1103 cellStateIn,
1104 scratchBuffer,
1105 outputStateOut,
1106 cellStateOut,
1107 output,
1108 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001109 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001110}
1111
keidav01a959ee52018-12-19 10:04:58 +00001112bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1113 const TensorInfo& input1,
1114 const TensorInfo& output,
1115 Optional<std::string&> reasonIfUnsupported) const
1116{
1117 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1118 reasonIfUnsupported,
1119 input0,
1120 input1,
1121 output);
1122}
1123
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001124bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1125 const TensorInfo& output,
1126 const MeanDescriptor& descriptor,
1127 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001128{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001129 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1130 reasonIfUnsupported,
1131 input,
1132 output,
1133 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001134}
1135
saoste019292aa32019-01-08 13:55:59 +00001136bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1137 const TensorInfo& input1,
1138 const TensorInfo& output,
1139 Optional<std::string&> reasonIfUnsupported) const
1140{
1141 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1142 reasonIfUnsupported,
1143 input0,
1144 input1,
1145 output);
1146}
1147
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001148bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1149 const TensorInfo& input1,
1150 const TensorInfo& output,
1151 Optional<std::string&> reasonIfUnsupported) const
1152{
1153 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1154 reasonIfUnsupported,
1155 input0,
1156 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001157 output,
1158 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001159}
1160
1161bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1162 const TensorInfo& output,
1163 const NormalizationDescriptor& descriptor,
1164 Optional<std::string&> reasonIfUnsupported) const
1165{
1166 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1167}
1168
1169bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1170 Optional<std::string&> reasonIfUnsupported) const
1171{
Derek Lamberti901ea112019-12-10 22:07:09 +00001172 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001173}
1174
1175bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1176 const TensorInfo& output,
1177 const PadDescriptor& descriptor,
1178 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001179{
1180 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1181 reasonIfUnsupported,
1182 input,
1183 output,
1184 descriptor);
1185}
1186
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001187bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1188 const TensorInfo& output,
1189 const PermuteDescriptor& descriptor,
1190 Optional<std::string&> reasonIfUnsupported) const
1191{
Matthew Bentham9820d302019-11-27 17:24:47 +00001192 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001193}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001194
1195bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1196 const TensorInfo& output,
1197 const Pooling2dDescriptor& descriptor,
1198 Optional<std::string&> reasonIfUnsupported) const
1199{
1200 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1201}
1202
Ryan OSheabab8fa92022-03-09 10:29:02 +00001203bool ClLayerSupport::IsPooling3dSupported(const TensorInfo& input,
1204 const TensorInfo& output,
1205 const Pooling3dDescriptor& descriptor,
1206 Optional<std::string&> reasonIfUnsupported) const
1207{
1208 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling3dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1209}
1210
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001211bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1212 const armnn::TensorInfo &alpha,
1213 const armnn::TensorInfo &output,
1214 armnn::Optional<std::string &> reasonIfUnsupported) const
1215{
1216 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1217}
1218
Ryan OShea2323af42020-05-13 16:36:19 +01001219bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1220 const TensorInfo& previousOutputIn,
1221 const TensorInfo& previousCellStateIn,
1222 const TensorInfo& outputStateOut,
1223 const TensorInfo& cellStateOut,
1224 const TensorInfo& output,
1225 const QLstmDescriptor& descriptor,
1226 const LstmInputParamsInfo& paramsInfo,
1227 Optional<std::string&> reasonIfUnsupported) const
1228{
1229 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1230 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1231 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1232 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1233 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1234 output.GetDataType() == armnn::DataType::QAsymmS8)
1235 {
1236 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1237 reasonIfUnsupported,
1238 input,
1239 previousCellStateIn,
1240 previousOutputIn,
1241 cellStateOut,
1242 outputStateOut,
1243 output,
1244 descriptor,
1245 paramsInfo);
1246 }
1247 else
1248 {
1249 return false;
1250 }
1251}
1252
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001253bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1254 const TensorInfo& previousCellStateIn,
1255 const TensorInfo& previousOutputIn,
1256 const TensorInfo& cellStateOut,
1257 const TensorInfo& output,
1258 const QuantizedLstmInputParamsInfo& paramsInfo,
1259 Optional<std::string&> reasonIfUnsupported) const
1260{
1261 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1262 reasonIfUnsupported,
1263 input,
1264 previousCellStateIn,
1265 previousOutputIn,
1266 cellStateOut,
1267 output,
1268 paramsInfo);
1269}
1270
Sadik Armagan20ec2492019-05-31 09:09:44 +01001271bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1272 const TensorInfo& output,
1273 Optional<std::string&> reasonIfUnsupported) const
1274{
1275 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1276 reasonIfUnsupported,
1277 input,
1278 output);
1279}
1280
Sadik Armagana2747482021-02-09 10:28:54 +00001281bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1282 const TensorInfo& output,
1283 const ReduceDescriptor& descriptor,
1284 Optional<std::string&> reasonIfUnsupported) const
1285{
1286 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1287 reasonIfUnsupported,
1288 input,
1289 output,
1290 descriptor);
1291}
1292
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001293bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001294 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001295 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001296 Optional<std::string&> reasonIfUnsupported) const
1297{
Jan Eilers8eb25602020-03-09 12:13:48 +00001298 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001299 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001300}
1301
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001302bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1303 const TensorInfo& output,
1304 const ResizeDescriptor& descriptor,
1305 Optional<std::string&> reasonIfUnsupported) const
1306{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001307 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001308}
1309
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001310bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1311 const TensorInfo& output,
1312 const SliceDescriptor& descriptor,
1313 Optional<std::string&> reasonIfUnsupported) const
1314{
1315 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1316}
1317
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001318bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1319 const TensorInfo& output,
1320 const SoftmaxDescriptor& descriptor,
1321 Optional<std::string&> reasonIfUnsupported) const
1322{
Francis Murtagh3b938352019-07-26 15:44:17 +01001323 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001324}
1325
Sadik Armaganf4464322018-12-20 16:19:12 +00001326bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1327 const TensorInfo& output,
1328 const SpaceToBatchNdDescriptor& descriptor,
1329 Optional<std::string&> reasonIfUnsupported) const
1330{
1331 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1332 reasonIfUnsupported,
1333 input,
1334 output,
1335 descriptor);
1336}
1337
James Conroyd2aa85e2019-07-01 17:12:40 +01001338bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1339 const TensorInfo& output,
1340 const SpaceToDepthDescriptor& descriptor,
1341 Optional<std::string&> reasonIfUnsupported) const
1342{
1343 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1344 reasonIfUnsupported,
1345 input,
1346 output,
1347 descriptor);
1348}
1349
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001350bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001351 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1352 const ViewsDescriptor& descriptor,
1353 Optional<std::string&> reasonIfUnsupported) const
1354{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001355#if defined(ARMCOMPUTECL_ENABLED)
1356 // Split along the last dimension, cannot use sub-tensors
1357 // as width and height of the sub-tensors do not match
1358 // the width and height of the parent tensor
1359 // in case of input with more than 2D.
1360 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1361 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1362 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1363 {
1364 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1365 reasonIfUnsupported,
1366 input,
1367 outputs,
1368 *splitAxis.begin());
1369 }
1370#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001371 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001372 for (auto output : outputs)
1373 {
1374 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1375 {
1376 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1377 return false;
1378 }
1379 }
1380 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001381}
1382
Matthew Jacksond5166102019-07-31 14:06:28 +01001383bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1384 const TensorInfo& output,
1385 const StackDescriptor& descriptor,
1386 Optional<std::string&> reasonIfUnsupported) const
1387{
1388 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1389 reasonIfUnsupported,
1390 inputs,
1391 output,
1392 descriptor);
1393}
1394
keidav01d74dc912018-12-10 18:16:07 +00001395bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1396 const TensorInfo& output,
1397 const StridedSliceDescriptor& descriptor,
1398 Optional<std::string&> reasonIfUnsupported) const
1399{
1400 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1401 reasonIfUnsupported,
1402 input,
1403 output,
1404 descriptor);
1405}
1406
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001407bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1408 const TensorInfo& input1,
1409 const TensorInfo& output,
1410 Optional<std::string&> reasonIfUnsupported) const
1411{
1412 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1413 reasonIfUnsupported,
1414 input0,
1415 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001416 output,
1417 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001418}
1419
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001420bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1421 const TensorInfo& output,
1422 const TransposeConvolution2dDescriptor& descriptor,
1423 const TensorInfo& weights,
1424 const Optional<TensorInfo>& biases,
1425 Optional<std::string&> reasonIfUnsupported) const
1426{
1427 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1428 reasonIfUnsupported,
1429 input,
1430 output,
1431 descriptor,
1432 weights,
1433 biases);
1434}
1435
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001436bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1437 const TensorInfo& output,
1438 const TransposeDescriptor& descriptor,
1439 Optional<std::string&> reasonIfUnsupported) const
1440{
1441 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1442}
1443
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001444bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1445 const TensorInfo& outputStateIn,
1446 const TensorInfo& cellStateIn,
1447 const TensorInfo& output,
1448 const Optional<TensorInfo>& hiddenStateOutput,
1449 const Optional<TensorInfo>& cellStateOutput,
1450 const UnidirectionalSequenceLstmDescriptor& descriptor,
1451 const LstmInputParamsInfo& paramsInfo,
1452 Optional<std::string&> reasonIfUnsupported) const
1453{
1454 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1455 reasonIfUnsupported,
1456 input,
1457 outputStateIn,
1458 cellStateIn,
1459 output,
1460 hiddenStateOutput,
1461 cellStateOutput,
1462 descriptor,
1463 paramsInfo);
1464}
1465
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001466} // namespace armnn