blob: e52f578bc035d96e4cc381aefcb0f5fcf19c1a66 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010025#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000026#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010027#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010028#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000029#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010030#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010031#include "workloads/ClConvertFp16ToFp32Workload.hpp"
32#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010033#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010034#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010035#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010036#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010037#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010038#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010039#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010040#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000041#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010042#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010043#include "workloads/ClGatherWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010044#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010045#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010046#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010047#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000048#include "workloads/ClLogicalAndWorkload.hpp"
49#include "workloads/ClLogicalNotWorkload.hpp"
50#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010051#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000052#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010053#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010054#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000055#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010056#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000057#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010058#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010059#include "workloads/ClPadWorkload.hpp"
60#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010061#include "workloads/ClPooling2dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010062#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010063#include "workloads/ClQLstmWorkload.hpp"
64#include "workloads/ClQuantizedLstmWorkload.hpp"
65#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000066#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000067#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010068#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010069#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010070#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000071#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010072#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000073#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010074#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010075#include "workloads/ClSplitterWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010076#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000077#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010078#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010079#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000080#include "workloads/ClTransposeWorkload.hpp"
Cathal Corbett4952a3e2022-03-03 15:14:18 +000081#include "workloads/ClUnidirectionalSequenceLstmFloatWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000082#endif
83
telsoa014fcda012018-03-09 14:13:49 +000084
85namespace armnn
86{
arovir017c22c702018-10-09 11:16:46 +010087
telsoa014fcda012018-03-09 14:13:49 +000088namespace
89{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010090
telsoa014fcda012018-03-09 14:13:49 +000091template<unsigned int FilterSize>
92bool IsMatchingSize2d(const TensorInfo& weightInfo)
93{
telsoa01c577f2c2018-08-31 09:22:23 +010094 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000095 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
96}
97
98template<uint32_t ValidStride>
99bool IsMatchingStride(uint32_t actualStride)
100{
101 return ValidStride == actualStride;
102}
103
104template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
105bool IsMatchingStride(uint32_t actualStride)
106{
107 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100108}
telsoa014fcda012018-03-09 14:13:49 +0000109
Derek Lamberti901ea112019-12-10 22:07:09 +0000110template<typename ... Args>
111bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000112{
Jan Eilers8eb25602020-03-09 12:13:48 +0000113 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000114#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000115 return true;
116#else
arovir01085f0a42018-10-08 14:48:19 +0100117 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000118 {
arovir01085f0a42018-10-08 14:48:19 +0100119 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000120 }
121 return false;
122#endif
123}
124
Matteo Martincighd95e9062019-01-31 15:35:59 +0000125#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000126#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
127#else
128#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
129#endif
130
Matteo Martincighd95e9062019-01-31 15:35:59 +0000131#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000132template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100133inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000134{
135 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
136 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
137 if (!supported && reasonIfUnsupported)
138 {
arovir01085f0a42018-10-08 14:48:19 +0100139 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000140 }
141 return supported;
142}
143
144#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
145 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
146#else
147#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000148 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000149#endif
150
telsoa01c577f2c2018-08-31 09:22:23 +0100151template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100152bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000153 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100154 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000155 Uint8Func uint8FuncPtr,
156 Params&&... params)
157{
158 return IsClBackendSupported(reasonIfUnsupported) &&
159 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
160 dataType,
161 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100162 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000163 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000164 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000165 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000166 std::forward<Params>(params)...);
167}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100168} // anonymous namespace
169
Sadik Armagan045f6be2020-09-10 13:37:32 +0100170ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
171 : m_ModelContextPtr(modelContextPtr)
172{
173}
174
175ClLayerSupport::ClLayerSupport()
176 : m_ModelContextPtr(nullptr)
177{
178}
179
Cathal Corbett34b429c2021-12-24 12:24:40 +0000180bool ClLayerSupport::IsLayerSupported(const LayerType& type,
181 const std::vector<TensorInfo>& infos,
182 const BaseDescriptor& descriptor,
183 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
184 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
185 Optional<std::string&> reasonIfUnsupported) const
186{
187 switch (type)
188 {
189 case LayerType::Activation:
190 return IsActivationSupported(infos[0],
191 infos[1],
192 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
193 reasonIfUnsupported);
194 case LayerType::Addition:
195 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
196 case LayerType::ArgMinMax:
197 return IsArgMinMaxSupported(infos[0],
198 infos[1],
199 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
200 reasonIfUnsupported);
201 case LayerType::BatchNormalization:
202 return IsBatchNormalizationSupported(infos[0],
203 infos[1],
204 infos[2],
205 infos[3],
206 infos[4],
207 infos[5],
208 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
209 (&descriptor)),
210 reasonIfUnsupported);
211 case LayerType::BatchToSpaceNd:
212 return IsBatchToSpaceNdSupported(infos[0],
213 infos[1],
214 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
215 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000216 case LayerType::Cast:
217 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
218 case LayerType::ChannelShuffle:
219 return IsChannelShuffleSupported(infos[0],
220 infos[1],
221 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
222 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000223 case LayerType::Comparison:
224 return IsComparisonSupported(infos[0],
225 infos[1],
226 infos[2],
227 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
228 reasonIfUnsupported);
229 case LayerType::Concat:
230 {
231 std::vector<const TensorInfo*> inputInfos;
232 for (uint32_t i = 0; i < (infos.size() - 1); i++)
233 {
234 inputInfos.push_back(&infos[i]);
235 }
236 return IsConcatSupported(inputInfos,
237 infos[infos.size() - 1],
238 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
239 reasonIfUnsupported);
240 }
241 case LayerType::Constant:
242 return IsConstantSupported(infos[0], reasonIfUnsupported);
243 case LayerType::ConvertFp16ToFp32:
244 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
245 case LayerType::ConvertFp32ToFp16:
246 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000247 case LayerType::ConvertBf16ToFp32:
248 return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
249 infos[1],
250 reasonIfUnsupported);
251 case LayerType::ConvertFp32ToBf16:
252 return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
253 infos[1],
254 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000255 case LayerType::Convolution2d:
256 {
257 if (infos.size() != 4)
258 {
259 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
260 "TensorInfos should be of format: {input, output, weights, biases}.");
261 }
262
263 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
264 if (infos[3] == TensorInfo())
265 {
266 return IsConvolution2dSupported(infos[0],
267 infos[1],
268 desc,
269 infos[2],
270 EmptyOptional(),
271 reasonIfUnsupported);
272 }
273 else
274 {
275 return IsConvolution2dSupported(infos[0],
276 infos[1],
277 desc,
278 infos[2],
279 infos[3],
280 reasonIfUnsupported);
281 }
282 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000283 case LayerType::Convolution3d:
284 {
285 if (infos.size() != 4)
286 {
287 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
288 "TensorInfos should be of format: {input, output, weights, biases}.");
289 }
290
291 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
292 if (infos[3] == TensorInfo())
293 {
294 return IsConvolution3dSupported(infos[0],
295 infos[1],
296 desc,
297 infos[2],
298 EmptyOptional(),
299 reasonIfUnsupported);
300 }
301 else
302 {
303 return IsConvolution3dSupported(infos[0],
304 infos[1],
305 desc,
306 infos[2],
307 infos[3],
308 reasonIfUnsupported);
309 }
310 }
Cathal Corbett34b429c2021-12-24 12:24:40 +0000311 case LayerType::DepthToSpace:
312 return IsDepthToSpaceSupported(infos[0],
313 infos[1],
314 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
315 reasonIfUnsupported);
316 case LayerType::DepthwiseConvolution2d:
317 {
318 if (infos.size() != 4)
319 {
320 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
321 "TensorInfos should be of format: {input, output, weights, biases}.");
322 }
323
324 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
325 if (infos[3] == TensorInfo())
326 {
327 return IsDepthwiseConvolutionSupported(infos[0],
328 infos[1],
329 desc,
330 infos[2],
331 EmptyOptional(),
332 reasonIfUnsupported);
333 }
334 else
335 {
336 return IsDepthwiseConvolutionSupported(infos[0],
337 infos[1],
338 desc,
339 infos[2],
340 infos[3],
341 reasonIfUnsupported);
342 }
343 }
344 case LayerType::Dequantize:
345 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
346 case LayerType::Division:
347 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
348 case LayerType::ElementwiseUnary:
349 return IsElementwiseUnarySupported(infos[0],
350 infos[1],
351 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
352 reasonIfUnsupported);
353 case LayerType::Fill:
354 return IsFillSupported(infos[0],
355 infos[1],
356 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
357 reasonIfUnsupported);
358 case LayerType::Floor:
359 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
360 case LayerType::FullyConnected:
361 return IsFullyConnectedSupported(infos[0],
362 infos[1],
363 infos[2],
364 infos[3],
365 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
366 reasonIfUnsupported);
367 case LayerType::Gather:
368 return IsGatherSupported(infos[0],
369 infos[1],
370 infos[2],
371 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
372 reasonIfUnsupported);
373 case LayerType::Input:
374 return IsInputSupported(infos[0], reasonIfUnsupported);
375 case LayerType::InstanceNormalization:
376 return IsInstanceNormalizationSupported(infos[0],
377 infos[1],
378 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
379 (&descriptor)),
380 reasonIfUnsupported);
381 case LayerType::L2Normalization:
382 return IsL2NormalizationSupported(infos[0],
383 infos[1],
384 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
385 reasonIfUnsupported);
386 case LayerType::LogicalBinary:
387 return IsLogicalBinarySupported(infos[0],
388 infos[1],
389 infos[2],
390 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
391 reasonIfUnsupported);
392 case LayerType::LogSoftmax:
393 return IsLogSoftmaxSupported(infos[0],
394 infos[1],
395 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
396 reasonIfUnsupported);
397 case LayerType::Lstm:
398 return IsLstmSupported(infos[0],
399 infos[1],
400 infos[2],
401 infos[3],
402 infos[4],
403 infos[5],
404 infos[6],
405 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
406 lstmParamsInfo.value(),
407 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000408 case LayerType::Map:
409 return true;
410 case LayerType::MemCopy:
411 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
412 case LayerType::MemImport:
413 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
414 case LayerType::Merge:
415 return LayerSupportBase::IsMergeSupported(infos[0],
416 infos[1],
417 infos[2],
418 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000419 case LayerType::Maximum:
420 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
421 case LayerType::Mean:
422 return IsMeanSupported(infos[0],
423 infos[1],
424 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
425 reasonIfUnsupported);
426 case LayerType::Minimum:
427 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
428 case LayerType::Multiplication:
429 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
430 case LayerType::Normalization:
431 return IsNormalizationSupported(infos[0],
432 infos[1],
433 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
434 reasonIfUnsupported);
435 case LayerType::Output:
436 return IsOutputSupported(infos[0], reasonIfUnsupported);
437 case LayerType::Pad:
438 return IsPadSupported(infos[0],
439 infos[1],
440 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
441 reasonIfUnsupported);
442 case LayerType::Permute:
443 return IsPermuteSupported(infos[0],
444 infos[1],
445 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
446 reasonIfUnsupported);
447 case LayerType::Pooling2d:
448 return IsPooling2dSupported(infos[0],
449 infos[1],
450 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
451 reasonIfUnsupported);
452 case LayerType::Prelu:
453 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000454 case LayerType::QLstm:
455 return IsQLstmSupported(infos[0],
456 infos[1],
457 infos[2],
458 infos[3],
459 infos[4],
460 infos[5],
461 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
462 lstmParamsInfo.value(),
463 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000464 case LayerType::Quantize:
465 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
466 case LayerType::QuantizedLstm:
467 return IsQuantizedLstmSupported(infos[0],
468 infos[1],
469 infos[2],
470 infos[3],
471 infos[4],
472 quantizedLstmParamsInfo.value(),
473 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000474 case LayerType::Rank:
475 return true;
476 case LayerType::Reduce:
477 return IsReduceSupported(infos[0],
478 infos[1],
479 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
480 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000481 case LayerType::Reshape:
482 return IsReshapeSupported(infos[0],
483 infos[1],
484 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
485 reasonIfUnsupported);
486 case LayerType::Resize:
487 return IsResizeSupported(infos[0],
488 infos[1],
489 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
490 reasonIfUnsupported);
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000491 case LayerType::Shape:
492 return LayerSupportBase::IsShapeSupported(infos[0],
493 infos[1],
494 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000495 case LayerType::Slice:
496 return IsSliceSupported(infos[0],
497 infos[1],
498 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
499 reasonIfUnsupported);
500 case LayerType::Softmax:
501 return IsSoftmaxSupported(infos[0],
502 infos[1],
503 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
504 reasonIfUnsupported);
505 case LayerType::SpaceToBatchNd:
506 return IsSpaceToBatchNdSupported(infos[0],
507 infos[1],
508 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
509 reasonIfUnsupported);
510 case LayerType::SpaceToDepth:
511 return IsSpaceToDepthSupported(infos[0],
512 infos[1],
513 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
514 reasonIfUnsupported);
515 case LayerType::Splitter:
516 {
517 std::vector<TensorInfo> outputInfos;
518 for (uint32_t i = 1; i < infos.size(); i++)
519 {
520 outputInfos.push_back(infos[i]);
521 }
522 return IsSplitterSupported(infos[0],
523 {outputInfos.begin(), outputInfos.end()},
524 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
525 reasonIfUnsupported);
526 }
527 case LayerType::Stack:
528 {
529 std::vector<const TensorInfo*> inputInfos;
530 for (uint32_t i = 0; i < infos.size() - 1; i++)
531 {
532 inputInfos.push_back(&infos[i]);
533 }
534 return IsStackSupported(inputInfos,
535 infos[infos.size() - 1],
536 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
537 reasonIfUnsupported);
538 }
539 case LayerType::StridedSlice:
540 return IsStridedSliceSupported(infos[0],
541 infos[1],
542 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
543 reasonIfUnsupported);
544 case LayerType::Subtraction:
545 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
546 case LayerType::Transpose:
547 return IsTransposeSupported(infos[0],
548 infos[1],
549 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
550 reasonIfUnsupported);
551 case LayerType::TransposeConvolution2d:
552 {
553 if (infos.size() != 4)
554 {
555 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
556 "TensorInfos should be of format: {input, output, weights, biases}.");
557 }
558
559 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
560 if (infos[3] == TensorInfo())
561 {
562 return IsTransposeConvolution2dSupported(infos[0],
563 infos[1],
564 desc,
565 infos[2],
566 EmptyOptional(),
567 reasonIfUnsupported);
568 }
569 else
570 {
571 return IsTransposeConvolution2dSupported(infos[0],
572 infos[1],
573 desc,
574 infos[2],
575 infos[3],
576 reasonIfUnsupported);
577 }
578 }
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000579 case LayerType::UnidirectionalSequenceLstm:
580 return IsUnidirectionalSequenceLstmSupported(infos[0],
581 infos[1],
582 infos[2],
583 infos[3],
584 infos[4],
585 infos[5],
586 *(PolymorphicDowncast<const
587 UnidirectionalSequenceLstmDescriptor*>(&descriptor)),
588 lstmParamsInfo.value(),
589 reasonIfUnsupported);
Cathal Corbett34b429c2021-12-24 12:24:40 +0000590 case LayerType::Unmap:
591 return true;
Cathal Corbett34b429c2021-12-24 12:24:40 +0000592 default:
593 // layers not supported in cl by default:
Cathal Corbett4952a3e2022-03-03 15:14:18 +0000594 // debug, detectionpostprocess, fakequantization,
595 // precompiled, standin, switch, pooling3d
Cathal Corbett34b429c2021-12-24 12:24:40 +0000596 return false;
597 }
598}
599
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100600bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
601 const TensorInfo& output,
602 const ActivationDescriptor& descriptor,
603 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000604{
telsoa01c577f2c2018-08-31 09:22:23 +0100605 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
606 reasonIfUnsupported,
607 input,
608 output,
609 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000610}
611
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100612bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
613 const TensorInfo& input1,
614 const TensorInfo& output,
615 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000616{
arovir01085f0a42018-10-08 14:48:19 +0100617 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
618 reasonIfUnsupported,
619 input0,
620 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000621 output,
622 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000623}
624
James Conroy2dc05722019-09-19 17:00:31 +0100625bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
626 const TensorInfo& output,
627 const ArgMinMaxDescriptor& descriptor,
628 Optional<std::string&> reasonIfUnsupported) const
629{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000630
James Conroy2dc05722019-09-19 17:00:31 +0100631 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
632 reasonIfUnsupported,
633 input,
634 output,
635 descriptor);
636}
637
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100638bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
639 const TensorInfo& output,
640 const TensorInfo& mean,
641 const TensorInfo& var,
642 const TensorInfo& beta,
643 const TensorInfo& gamma,
644 const BatchNormalizationDescriptor& descriptor,
645 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000646{
telsoa01c577f2c2018-08-31 09:22:23 +0100647 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
648 reasonIfUnsupported,
649 input,
650 output,
651 mean,
652 var,
653 beta,
654 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000655 descriptor,
656 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000657}
658
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100659bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
660 const TensorInfo& output,
661 const BatchToSpaceNdDescriptor& descriptor,
662 Optional<std::string&> reasonIfUnsupported) const
663{
664 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
665 reasonIfUnsupported,
666 input,
667 output,
668 descriptor);
669}
670
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100671bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
672 const TensorInfo& output,
673 Optional<std::string&> reasonIfUnsupported) const
674{
675 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
676 reasonIfUnsupported,
677 input,
678 output);
679}
680
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100681bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000682 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100683 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000684 Optional<std::string&> reasonIfUnsupported) const
685{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100686 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000687 reasonIfUnsupported,
688 input,
689 output,
690 descriptor);
691}
692
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100693bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
694 const TensorInfo& input1,
695 const TensorInfo& output,
696 const ComparisonDescriptor& descriptor,
697 Optional<std::string&> reasonIfUnsupported) const
698{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000699 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
700 reasonIfUnsupported,
701 input0,
702 input1,
703 output,
704 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100705}
706
Jim Flynn906f9462019-05-10 13:55:21 +0100707bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
708 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000709 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100710 Optional<std::string&> reasonIfUnsupported) const
711{
Jim Flynne242f2d2019-05-22 14:24:13 +0100712 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
713 {
714 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
715 return false;
716 }
717
718 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
719 if(concatInnerAxis < 3) // Width, height, or channels
720 {
721 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
722 reasonIfUnsupported,
723 inputs,
724 output,
725 descriptor);
726 }
727 else if (concatInnerAxis == 3)
728 {
729 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
730 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
731 for (auto& input : inputs)
732 {
733 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
734 {
735 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
736 return false;
737 }
738 }
739 return true; // Sub-tensors support concat along batch
740 }
741 else // > 4 dimensions not supported.
742 {
743 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
744 return false;
745 }
Jim Flynn906f9462019-05-10 13:55:21 +0100746}
747
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100748bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
749 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000750{
Mike Kelly0886ac42020-04-27 09:55:40 +0100751 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
752 reasonIfUnsupported,
753 output);
telsoa014fcda012018-03-09 14:13:49 +0000754}
755
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100756bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
757 const TensorInfo& output,
758 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000759{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100760 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
761 reasonIfUnsupported,
762 input,
763 output);
telsoa014fcda012018-03-09 14:13:49 +0000764}
765
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100766bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
767 const TensorInfo& output,
768 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000769{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100770 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
771 reasonIfUnsupported,
772 input,
773 output);
telsoa014fcda012018-03-09 14:13:49 +0000774}
775
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100776bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
777 const TensorInfo& output,
778 const Convolution2dDescriptor& descriptor,
779 const TensorInfo& weights,
780 const Optional<TensorInfo>& biases,
781 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000782{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100783 bool isFastMathEnabled = false;
784#if defined(ARMCOMPUTECL_ENABLED)
785 if (m_ModelContextPtr)
786 {
787 if (m_ModelContextPtr.get() != nullptr)
788 {
789 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
790 if (modelOptions)
791 {
792 isFastMathEnabled = modelOptions->IsFastMathEnabled();
793 }
794 }
795 }
796#endif
797
surmeh013537c2c2018-05-18 16:31:43 +0100798 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
799 reasonIfUnsupported,
800 input,
801 output,
802 descriptor,
803 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100804 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000805 isFastMathEnabled,
806 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000807}
808
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100809bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
810 const TensorInfo& output,
811 const Convolution3dDescriptor& descriptor,
812 const TensorInfo& weights,
813 const Optional<TensorInfo>& biases,
814 Optional<std::string&> reasonIfUnsupported) const
815{
816 bool isFastMathEnabled = false;
817#if defined(ARMCOMPUTECL_ENABLED)
818 if (m_ModelContextPtr)
819{
820 if (m_ModelContextPtr.get() != nullptr)
821 {
822 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
823 if (modelOptions)
824 {
825 isFastMathEnabled = modelOptions->IsFastMathEnabled();
826 }
827 }
828}
829#endif
830
831 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
832 reasonIfUnsupported,
833 input,
834 output,
835 descriptor,
836 weights,
837 biases,
838 isFastMathEnabled,
839 nullptr);
840}
841
Jim Flynn983daec2019-05-29 16:20:16 +0100842bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
843 const TensorInfo& output,
844 Optional<std::string&> reasonIfUnsupported) const
845{
846 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
847 reasonIfUnsupported,
848 input,
849 output);
850}
851
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100852bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
853 const TensorInfo& output,
854 const DepthToSpaceDescriptor& descriptor,
855 Optional<std::string&> reasonIfUnsupported) const
856{
857 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
858 reasonIfUnsupported,
859 input,
860 output,
861 descriptor);
862}
863
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100864bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
865 const TensorInfo& output,
866 const DepthwiseConvolution2dDescriptor& descriptor,
867 const TensorInfo& weights,
868 const Optional<TensorInfo>& biases,
869 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000870{
telsoa01c577f2c2018-08-31 09:22:23 +0100871 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
872 reasonIfUnsupported,
873 input,
874 output,
875 descriptor,
876 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000877 biases,
878 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000879}
880
Pablo Tellof0bd6832019-04-26 17:58:13 +0100881bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
882 const TensorInfo& output,
883 const DepthwiseConvolution2dDescriptor& descriptor,
884 const TensorInfo& weights,
885 const Optional<TensorInfo>& biases,
886 Optional<std::string&> reasonIfUnsupported) const
887{
888 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
889 reasonIfUnsupported,
890 input,
891 output,
892 descriptor,
893 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000894 biases,
895 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100896}
897
898
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100899bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
900 const TensorInfo& input1,
901 const TensorInfo& output,
902 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100903{
904 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
905 reasonIfUnsupported,
906 input0,
907 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000908 output,
909 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100910}
911
josh minor4a3c6102020-01-06 16:40:46 -0600912bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
913 const TensorInfo& output,
914 const ElementwiseUnaryDescriptor& descriptor,
915 Optional<std::string&> reasonIfUnsupported) const
916{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100917 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600918 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100919 case UnaryOperation::Abs:
920 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
921 reasonIfUnsupported,
922 input,
923 output);
924 case UnaryOperation::Exp:
925 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
926 reasonIfUnsupported,
927 input,
928 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100929 case UnaryOperation::Log:
930 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
931 reasonIfUnsupported,
932 input,
933 output);
934 case UnaryOperation::LogicalNot:
935 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
936 reasonIfUnsupported,
937 input,
938 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100939 case UnaryOperation::Neg:
940 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
941 reasonIfUnsupported,
942 input,
943 output);
944 case UnaryOperation::Rsqrt:
945 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
946 reasonIfUnsupported,
947 input,
948 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100949 case UnaryOperation::Sin:
950 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +0000951 reasonIfUnsupported,
952 input,
953 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100954 default:
955 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600956 }
josh minor4a3c6102020-01-06 16:40:46 -0600957}
958
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100959bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
960 const TensorInfo& output,
961 const FillDescriptor& descriptor,
962 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +0100963{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100964 armnn::IgnoreUnused(input);
965 armnn::IgnoreUnused(output);
966 armnn::IgnoreUnused(descriptor);
967
968 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +0100969}
970
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100971bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
972 const TensorInfo& output,
973 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000974{
Sadik Armagan9be49162019-10-30 16:15:26 +0000975 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
976 reasonIfUnsupported,
977 input,
978 output);
telsoa01c577f2c2018-08-31 09:22:23 +0100979}
980
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100981bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
982 const TensorInfo& output,
983 const TensorInfo& weights,
984 const TensorInfo& biases,
985 const FullyConnectedDescriptor& descriptor,
986 Optional<std::string&> reasonIfUnsupported) const
987{
988 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
989 reasonIfUnsupported,
990 input,
991 output,
992 weights,
993 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000994 descriptor,
995 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100996}
997
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +0100998bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
999 const TensorInfo& input1,
1000 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +01001001 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001002 Optional<std::string&> reasonIfUnsupported) const
1003{
1004 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
1005 reasonIfUnsupported,
1006 input0,
1007 input1,
Teresa Charlin52664732020-06-29 16:27:03 +01001008 output,
1009 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +01001010}
1011
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001012bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1013 Optional<std::string&> reasonIfUnsupported) const
1014{
Derek Lamberti901ea112019-12-10 22:07:09 +00001015 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001016}
1017
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001018bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1019 const TensorInfo& output,
1020 const InstanceNormalizationDescriptor& descriptor,
1021 Optional<std::string&> reasonIfUnsupported) const
1022{
1023 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1024 reasonIfUnsupported,
1025 input,
1026 output,
1027 descriptor);
1028}
1029
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001030bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1031 const TensorInfo& output,
1032 const L2NormalizationDescriptor& descriptor,
1033 Optional<std::string&> reasonIfUnsupported) const
1034{
1035 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1036 reasonIfUnsupported,
1037 input,
1038 output,
1039 descriptor);
1040}
1041
James Conroyfe3ec942020-11-18 14:20:53 +00001042bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1043 const TensorInfo& input1,
1044 const TensorInfo& output,
1045 const LogicalBinaryDescriptor& descriptor,
1046 Optional<std::string&> reasonIfUnsupported) const
1047{
1048 IgnoreUnused(output);
1049
1050 switch(descriptor.m_Operation)
1051 {
1052 case LogicalBinaryOperation::LogicalAnd:
1053 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1054 reasonIfUnsupported,
1055 input0,
1056 input1,
1057 output);
1058 case LogicalBinaryOperation::LogicalOr:
1059 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1060 reasonIfUnsupported,
1061 input0,
1062 input1,
1063 output);
1064 default:
1065 return false;
1066 }
1067}
1068
1069
Teresa Charlin8398edc2020-07-20 14:23:02 +01001070bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1071 const TensorInfo& output,
1072 const LogSoftmaxDescriptor& descriptor,
1073 Optional<std::string&> reasonIfUnsupported) const
1074{
1075 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1076 reasonIfUnsupported,
1077 input,
1078 output,
1079 descriptor);
1080}
1081
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001082bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1083 const TensorInfo& outputStateIn,
1084 const TensorInfo& cellStateIn,
1085 const TensorInfo& scratchBuffer,
1086 const TensorInfo& outputStateOut,
1087 const TensorInfo& cellStateOut,
1088 const TensorInfo& output,
1089 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001090 const LstmInputParamsInfo& paramsInfo,
1091 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001092{
arovir01085f0a42018-10-08 14:48:19 +01001093 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1094 reasonIfUnsupported,
1095 input,
1096 outputStateIn,
1097 cellStateIn,
1098 scratchBuffer,
1099 outputStateOut,
1100 cellStateOut,
1101 output,
1102 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001103 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001104}
1105
keidav01a959ee52018-12-19 10:04:58 +00001106bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1107 const TensorInfo& input1,
1108 const TensorInfo& output,
1109 Optional<std::string&> reasonIfUnsupported) const
1110{
1111 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1112 reasonIfUnsupported,
1113 input0,
1114 input1,
1115 output);
1116}
1117
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001118bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1119 const TensorInfo& output,
1120 const MeanDescriptor& descriptor,
1121 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001122{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001123 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1124 reasonIfUnsupported,
1125 input,
1126 output,
1127 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001128}
1129
saoste019292aa32019-01-08 13:55:59 +00001130bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1131 const TensorInfo& input1,
1132 const TensorInfo& output,
1133 Optional<std::string&> reasonIfUnsupported) const
1134{
1135 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1136 reasonIfUnsupported,
1137 input0,
1138 input1,
1139 output);
1140}
1141
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001142bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1143 const TensorInfo& input1,
1144 const TensorInfo& output,
1145 Optional<std::string&> reasonIfUnsupported) const
1146{
1147 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1148 reasonIfUnsupported,
1149 input0,
1150 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001151 output,
1152 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001153}
1154
1155bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1156 const TensorInfo& output,
1157 const NormalizationDescriptor& descriptor,
1158 Optional<std::string&> reasonIfUnsupported) const
1159{
1160 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1161}
1162
1163bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1164 Optional<std::string&> reasonIfUnsupported) const
1165{
Derek Lamberti901ea112019-12-10 22:07:09 +00001166 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001167}
1168
1169bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1170 const TensorInfo& output,
1171 const PadDescriptor& descriptor,
1172 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001173{
1174 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1175 reasonIfUnsupported,
1176 input,
1177 output,
1178 descriptor);
1179}
1180
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001181bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1182 const TensorInfo& output,
1183 const PermuteDescriptor& descriptor,
1184 Optional<std::string&> reasonIfUnsupported) const
1185{
Matthew Bentham9820d302019-11-27 17:24:47 +00001186 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001187}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001188
1189bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1190 const TensorInfo& output,
1191 const Pooling2dDescriptor& descriptor,
1192 Optional<std::string&> reasonIfUnsupported) const
1193{
1194 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1195}
1196
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001197bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1198 const armnn::TensorInfo &alpha,
1199 const armnn::TensorInfo &output,
1200 armnn::Optional<std::string &> reasonIfUnsupported) const
1201{
1202 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1203}
1204
Ryan OShea2323af42020-05-13 16:36:19 +01001205bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1206 const TensorInfo& previousOutputIn,
1207 const TensorInfo& previousCellStateIn,
1208 const TensorInfo& outputStateOut,
1209 const TensorInfo& cellStateOut,
1210 const TensorInfo& output,
1211 const QLstmDescriptor& descriptor,
1212 const LstmInputParamsInfo& paramsInfo,
1213 Optional<std::string&> reasonIfUnsupported) const
1214{
1215 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1216 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1217 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1218 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1219 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1220 output.GetDataType() == armnn::DataType::QAsymmS8)
1221 {
1222 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1223 reasonIfUnsupported,
1224 input,
1225 previousCellStateIn,
1226 previousOutputIn,
1227 cellStateOut,
1228 outputStateOut,
1229 output,
1230 descriptor,
1231 paramsInfo);
1232 }
1233 else
1234 {
1235 return false;
1236 }
1237}
1238
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001239bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1240 const TensorInfo& previousCellStateIn,
1241 const TensorInfo& previousOutputIn,
1242 const TensorInfo& cellStateOut,
1243 const TensorInfo& output,
1244 const QuantizedLstmInputParamsInfo& paramsInfo,
1245 Optional<std::string&> reasonIfUnsupported) const
1246{
1247 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1248 reasonIfUnsupported,
1249 input,
1250 previousCellStateIn,
1251 previousOutputIn,
1252 cellStateOut,
1253 output,
1254 paramsInfo);
1255}
1256
Sadik Armagan20ec2492019-05-31 09:09:44 +01001257bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1258 const TensorInfo& output,
1259 Optional<std::string&> reasonIfUnsupported) const
1260{
1261 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1262 reasonIfUnsupported,
1263 input,
1264 output);
1265}
1266
Sadik Armagana2747482021-02-09 10:28:54 +00001267bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1268 const TensorInfo& output,
1269 const ReduceDescriptor& descriptor,
1270 Optional<std::string&> reasonIfUnsupported) const
1271{
1272 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1273 reasonIfUnsupported,
1274 input,
1275 output,
1276 descriptor);
1277}
1278
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001279bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001280 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001281 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001282 Optional<std::string&> reasonIfUnsupported) const
1283{
Jan Eilers8eb25602020-03-09 12:13:48 +00001284 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001285 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001286}
1287
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001288bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1289 const TensorInfo& output,
1290 const ResizeDescriptor& descriptor,
1291 Optional<std::string&> reasonIfUnsupported) const
1292{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001293 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001294}
1295
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001296bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1297 const TensorInfo& output,
1298 const SliceDescriptor& descriptor,
1299 Optional<std::string&> reasonIfUnsupported) const
1300{
1301 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1302}
1303
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001304bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1305 const TensorInfo& output,
1306 const SoftmaxDescriptor& descriptor,
1307 Optional<std::string&> reasonIfUnsupported) const
1308{
Francis Murtagh3b938352019-07-26 15:44:17 +01001309 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001310}
1311
Sadik Armaganf4464322018-12-20 16:19:12 +00001312bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1313 const TensorInfo& output,
1314 const SpaceToBatchNdDescriptor& descriptor,
1315 Optional<std::string&> reasonIfUnsupported) const
1316{
1317 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1318 reasonIfUnsupported,
1319 input,
1320 output,
1321 descriptor);
1322}
1323
James Conroyd2aa85e2019-07-01 17:12:40 +01001324bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1325 const TensorInfo& output,
1326 const SpaceToDepthDescriptor& descriptor,
1327 Optional<std::string&> reasonIfUnsupported) const
1328{
1329 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1330 reasonIfUnsupported,
1331 input,
1332 output,
1333 descriptor);
1334}
1335
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001336bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001337 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1338 const ViewsDescriptor& descriptor,
1339 Optional<std::string&> reasonIfUnsupported) const
1340{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001341#if defined(ARMCOMPUTECL_ENABLED)
1342 // Split along the last dimension, cannot use sub-tensors
1343 // as width and height of the sub-tensors do not match
1344 // the width and height of the parent tensor
1345 // in case of input with more than 2D.
1346 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1347 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1348 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1349 {
1350 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1351 reasonIfUnsupported,
1352 input,
1353 outputs,
1354 *splitAxis.begin());
1355 }
1356#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001357 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001358 for (auto output : outputs)
1359 {
1360 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1361 {
1362 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1363 return false;
1364 }
1365 }
1366 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001367}
1368
Matthew Jacksond5166102019-07-31 14:06:28 +01001369bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1370 const TensorInfo& output,
1371 const StackDescriptor& descriptor,
1372 Optional<std::string&> reasonIfUnsupported) const
1373{
1374 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1375 reasonIfUnsupported,
1376 inputs,
1377 output,
1378 descriptor);
1379}
1380
keidav01d74dc912018-12-10 18:16:07 +00001381bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1382 const TensorInfo& output,
1383 const StridedSliceDescriptor& descriptor,
1384 Optional<std::string&> reasonIfUnsupported) const
1385{
1386 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1387 reasonIfUnsupported,
1388 input,
1389 output,
1390 descriptor);
1391}
1392
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001393bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1394 const TensorInfo& input1,
1395 const TensorInfo& output,
1396 Optional<std::string&> reasonIfUnsupported) const
1397{
1398 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1399 reasonIfUnsupported,
1400 input0,
1401 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001402 output,
1403 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001404}
1405
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001406bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1407 const TensorInfo& output,
1408 const TransposeConvolution2dDescriptor& descriptor,
1409 const TensorInfo& weights,
1410 const Optional<TensorInfo>& biases,
1411 Optional<std::string&> reasonIfUnsupported) const
1412{
1413 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1414 reasonIfUnsupported,
1415 input,
1416 output,
1417 descriptor,
1418 weights,
1419 biases);
1420}
1421
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001422bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1423 const TensorInfo& output,
1424 const TransposeDescriptor& descriptor,
1425 Optional<std::string&> reasonIfUnsupported) const
1426{
1427 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1428}
1429
Cathal Corbett4952a3e2022-03-03 15:14:18 +00001430bool ClLayerSupport::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
1431 const TensorInfo& outputStateIn,
1432 const TensorInfo& cellStateIn,
1433 const TensorInfo& output,
1434 const Optional<TensorInfo>& hiddenStateOutput,
1435 const Optional<TensorInfo>& cellStateOutput,
1436 const UnidirectionalSequenceLstmDescriptor& descriptor,
1437 const LstmInputParamsInfo& paramsInfo,
1438 Optional<std::string&> reasonIfUnsupported) const
1439{
1440 FORWARD_WORKLOAD_VALIDATE_FUNC(ClUnidirectionalSequenceLstmFloatWorkloadValidate,
1441 reasonIfUnsupported,
1442 input,
1443 outputStateIn,
1444 cellStateIn,
1445 output,
1446 hiddenStateOutput,
1447 cellStateOutput,
1448 descriptor,
1449 paramsInfo);
1450}
1451
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001452} // namespace armnn