blob: e5204e4d5bf5db6b46a0ca60dabf298d6331858c [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlin8398edc2020-07-20 14:23:02 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
Sadik Armagan045f6be2020-09-10 13:37:32 +01008#include "ClBackendModelContext.hpp"
arovir017c22c702018-10-09 11:16:46 +01009
Matteo Martincighc601aa62019-10-29 15:03:22 +000010#include <armnn/BackendRegistry.hpp>
11
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000012#include <InternalTypes.hpp>
13#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000014
Sadik Armagan045f6be2020-09-10 13:37:32 +010015#include <armnn/utility/IgnoreUnused.hpp>
16#include <armnn/utility/PolymorphicDowncast.hpp>
17
Matteo Martincighd95e9062019-01-31 15:35:59 +000018#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010019#include <aclCommon/ArmComputeUtils.hpp>
Aron Virginas-Tar710f6642019-11-27 14:48:32 +000020#include <aclCommon/ArmComputeTensorUtils.hpp>
Aron Virginas-Tar82046942019-09-09 15:18:29 +010021#include "workloads/ClAbsWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010022#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010023#include "workloads/ClActivationWorkload.hpp"
James Conroy2dc05722019-09-19 17:00:31 +010024#include "workloads/ClArgMinMaxWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010025#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000026#include "workloads/ClBatchToSpaceNdWorkload.hpp"
Sadik Armaganf40d6d42021-04-22 09:12:11 +010027#include "workloads/ClCastWorkload.hpp"
Teresa Charlin1222dbd2021-09-02 13:58:52 +010028#include "workloads/ClChannelShuffleWorkload.hpp"
Teresa Charlin2b030d92020-03-27 16:40:56 +000029#include "workloads/ClComparisonWorkload.hpp"
Mike Kelly0886ac42020-04-27 09:55:40 +010030#include "workloads/ClConstantWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010031#include "workloads/ClConvertFp16ToFp32Workload.hpp"
32#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010033#include "workloads/ClConvolution2dWorkload.hpp"
Teresa Charlin615ad6c2021-10-26 12:22:20 +010034#include "workloads/ClConvolution3dWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010035#include "workloads/ClDepthToSpaceWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010036#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
Aron Virginas-Tarb2801962019-09-30 11:24:53 +010037#include "workloads/ClDequantizeWorkload.hpp"
Teresa Charline11e63d2021-04-21 12:56:45 +010038#include "workloads/ClDivisionWorkload.hpp"
Sadik Armagan9fabf432020-05-27 13:40:58 +010039#include "workloads/ClExpWorkload.hpp"
Sadik Armagan66aecb02020-06-24 11:42:20 +010040#include "workloads/ClFillWorkload.hpp"
Sadik Armagan9be49162019-10-30 16:15:26 +000041#include "workloads/ClFloorFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010042#include "workloads/ClFullyConnectedWorkload.hpp"
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +010043#include "workloads/ClGatherWorkload.hpp"
Aron Virginas-Tar8168f402019-10-04 13:10:16 +010044#include "workloads/ClInstanceNormalizationWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010045#include "workloads/ClL2NormalizationFloatWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010046#include "workloads/ClLogWorkload.hpp"
Teresa Charlin8398edc2020-07-20 14:23:02 +010047#include "workloads/ClLogSoftmaxWorkload.hpp"
James Conroyfe3ec942020-11-18 14:20:53 +000048#include "workloads/ClLogicalAndWorkload.hpp"
49#include "workloads/ClLogicalNotWorkload.hpp"
50#include "workloads/ClLogicalOrWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010051#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000052#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010053#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010054#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000055#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010056#include "workloads/ClMultiplicationWorkload.hpp"
Sadik Armaganac472102020-03-24 09:54:36 +000057#include "workloads/ClNegWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010058#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010059#include "workloads/ClPadWorkload.hpp"
60#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010061#include "workloads/ClPooling2dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010062#include "workloads/ClPreluWorkload.hpp"
Ryan OShea2323af42020-05-13 16:36:19 +010063#include "workloads/ClQLstmWorkload.hpp"
64#include "workloads/ClQuantizedLstmWorkload.hpp"
65#include "workloads/ClQuantizeWorkload.hpp"
Sadik Armagana2747482021-02-09 10:28:54 +000066#include "workloads/ClReduceWorkload.hpp"
Kevin Maya023c402019-12-12 17:28:05 +000067#include "workloads/ClReshapeWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010068#include "workloads/ClResizeWorkload.hpp"
Aron Virginas-Tar1a763dd2019-09-10 12:32:08 +010069#include "workloads/ClRsqrtWorkload.hpp"
Teresa Charlin50de4fa2021-05-31 18:47:33 +010070#include "workloads/ClSinWorkload.hpp"
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +000071#include "workloads/ClSliceWorkload.hpp"
Teresa Charlinc1f6b092020-05-11 16:10:38 +010072#include "workloads/ClSoftmaxWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000073#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010074#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010075#include "workloads/ClSplitterWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010076#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000077#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010078#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010079#include "workloads/ClTransposeConvolution2dWorkload.hpp"
Mike Kellyc9ea45a2020-02-28 18:11:58 +000080#include "workloads/ClTransposeWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000081#endif
82
telsoa014fcda012018-03-09 14:13:49 +000083
84namespace armnn
85{
arovir017c22c702018-10-09 11:16:46 +010086
telsoa014fcda012018-03-09 14:13:49 +000087namespace
88{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010089
telsoa014fcda012018-03-09 14:13:49 +000090template<unsigned int FilterSize>
91bool IsMatchingSize2d(const TensorInfo& weightInfo)
92{
telsoa01c577f2c2018-08-31 09:22:23 +010093 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000094 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
95}
96
97template<uint32_t ValidStride>
98bool IsMatchingStride(uint32_t actualStride)
99{
100 return ValidStride == actualStride;
101}
102
103template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
104bool IsMatchingStride(uint32_t actualStride)
105{
106 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100107}
telsoa014fcda012018-03-09 14:13:49 +0000108
Derek Lamberti901ea112019-12-10 22:07:09 +0000109template<typename ... Args>
110bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
telsoa014fcda012018-03-09 14:13:49 +0000111{
Jan Eilers8eb25602020-03-09 12:13:48 +0000112 IgnoreUnused(reasonIfUnsupported, (args)...);
Matteo Martincighd95e9062019-01-31 15:35:59 +0000113#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000114 return true;
115#else
arovir01085f0a42018-10-08 14:48:19 +0100116 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +0000117 {
arovir01085f0a42018-10-08 14:48:19 +0100118 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +0000119 }
120 return false;
121#endif
122}
123
Matteo Martincighd95e9062019-01-31 15:35:59 +0000124#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000125#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
126#else
127#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
128#endif
129
Matteo Martincighd95e9062019-01-31 15:35:59 +0000130#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000131template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100132inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000133{
134 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
135 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
136 if (!supported && reasonIfUnsupported)
137 {
arovir01085f0a42018-10-08 14:48:19 +0100138 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000139 }
140 return supported;
141}
142
143#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
144 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
145#else
146#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
Derek Lamberti901ea112019-12-10 22:07:09 +0000147 return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
telsoa014fcda012018-03-09 14:13:49 +0000148#endif
149
telsoa01c577f2c2018-08-31 09:22:23 +0100150template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100151bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000152 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100153 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000154 Uint8Func uint8FuncPtr,
155 Params&&... params)
156{
157 return IsClBackendSupported(reasonIfUnsupported) &&
158 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
159 dataType,
160 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100161 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000162 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000163 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000164 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000165 std::forward<Params>(params)...);
166}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100167} // anonymous namespace
168
Sadik Armagan045f6be2020-09-10 13:37:32 +0100169ClLayerSupport::ClLayerSupport(const IBackendInternal::IBackendSpecificModelContextPtr& modelContextPtr)
170 : m_ModelContextPtr(modelContextPtr)
171{
172}
173
174ClLayerSupport::ClLayerSupport()
175 : m_ModelContextPtr(nullptr)
176{
177}
178
Cathal Corbett34b429c2021-12-24 12:24:40 +0000179bool ClLayerSupport::IsLayerSupported(const LayerType& type,
180 const std::vector<TensorInfo>& infos,
181 const BaseDescriptor& descriptor,
182 const Optional<LstmInputParamsInfo>& lstmParamsInfo,
183 const Optional<QuantizedLstmInputParamsInfo>& quantizedLstmParamsInfo,
184 Optional<std::string&> reasonIfUnsupported) const
185{
186 switch (type)
187 {
188 case LayerType::Activation:
189 return IsActivationSupported(infos[0],
190 infos[1],
191 *(PolymorphicDowncast<const ActivationDescriptor*>(&descriptor)),
192 reasonIfUnsupported);
193 case LayerType::Addition:
194 return IsAdditionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
195 case LayerType::ArgMinMax:
196 return IsArgMinMaxSupported(infos[0],
197 infos[1],
198 *(PolymorphicDowncast<const ArgMinMaxDescriptor*>(&descriptor)),
199 reasonIfUnsupported);
200 case LayerType::BatchNormalization:
201 return IsBatchNormalizationSupported(infos[0],
202 infos[1],
203 infos[2],
204 infos[3],
205 infos[4],
206 infos[5],
207 *(PolymorphicDowncast<const BatchNormalizationDescriptor*>
208 (&descriptor)),
209 reasonIfUnsupported);
210 case LayerType::BatchToSpaceNd:
211 return IsBatchToSpaceNdSupported(infos[0],
212 infos[1],
213 *(PolymorphicDowncast<const BatchToSpaceNdDescriptor*>(&descriptor)),
214 reasonIfUnsupported);
215 case LayerType::Comparison:
216 return IsComparisonSupported(infos[0],
217 infos[1],
218 infos[2],
219 *(PolymorphicDowncast<const ComparisonDescriptor*>(&descriptor)),
220 reasonIfUnsupported);
221 case LayerType::Concat:
222 {
223 std::vector<const TensorInfo*> inputInfos;
224 for (uint32_t i = 0; i < (infos.size() - 1); i++)
225 {
226 inputInfos.push_back(&infos[i]);
227 }
228 return IsConcatSupported(inputInfos,
229 infos[infos.size() - 1],
230 *(PolymorphicDowncast<const OriginsDescriptor*>(&descriptor)),
231 reasonIfUnsupported);
232 }
233 case LayerType::Constant:
234 return IsConstantSupported(infos[0], reasonIfUnsupported);
235 case LayerType::ConvertFp16ToFp32:
236 return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported);
237 case LayerType::ConvertFp32ToFp16:
238 return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported);
239 case LayerType::Convolution2d:
240 {
241 if (infos.size() != 4)
242 {
243 throw InvalidArgumentException("Invalid number of Convolution2d TensorInfos. "
244 "TensorInfos should be of format: {input, output, weights, biases}.");
245 }
246
247 auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
248 if (infos[3] == TensorInfo())
249 {
250 return IsConvolution2dSupported(infos[0],
251 infos[1],
252 desc,
253 infos[2],
254 EmptyOptional(),
255 reasonIfUnsupported);
256 }
257 else
258 {
259 return IsConvolution2dSupported(infos[0],
260 infos[1],
261 desc,
262 infos[2],
263 infos[3],
264 reasonIfUnsupported);
265 }
266 }
267 case LayerType::DepthToSpace:
268 return IsDepthToSpaceSupported(infos[0],
269 infos[1],
270 *(PolymorphicDowncast<const DepthToSpaceDescriptor*>(&descriptor)),
271 reasonIfUnsupported);
272 case LayerType::DepthwiseConvolution2d:
273 {
274 if (infos.size() != 4)
275 {
276 throw InvalidArgumentException("Invalid number of DepthwiseConvolution2d TensorInfos. "
277 "TensorInfos should be of format: {input, output, weights, biases}.");
278 }
279
280 auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
281 if (infos[3] == TensorInfo())
282 {
283 return IsDepthwiseConvolutionSupported(infos[0],
284 infos[1],
285 desc,
286 infos[2],
287 EmptyOptional(),
288 reasonIfUnsupported);
289 }
290 else
291 {
292 return IsDepthwiseConvolutionSupported(infos[0],
293 infos[1],
294 desc,
295 infos[2],
296 infos[3],
297 reasonIfUnsupported);
298 }
299 }
300 case LayerType::Dequantize:
301 return IsDequantizeSupported(infos[0], infos[1], reasonIfUnsupported);
302 case LayerType::Division:
303 return IsDivisionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
304 case LayerType::ElementwiseUnary:
305 return IsElementwiseUnarySupported(infos[0],
306 infos[1],
307 *(PolymorphicDowncast<const ElementwiseUnaryDescriptor*>(&descriptor)),
308 reasonIfUnsupported);
309 case LayerType::Fill:
310 return IsFillSupported(infos[0],
311 infos[1],
312 *(PolymorphicDowncast<const FillDescriptor*>(&descriptor)),
313 reasonIfUnsupported);
314 case LayerType::Floor:
315 return IsFloorSupported(infos[0], infos[1], reasonIfUnsupported);
316 case LayerType::FullyConnected:
317 return IsFullyConnectedSupported(infos[0],
318 infos[1],
319 infos[2],
320 infos[3],
321 *(PolymorphicDowncast<const FullyConnectedDescriptor*>(&descriptor)),
322 reasonIfUnsupported);
323 case LayerType::Gather:
324 return IsGatherSupported(infos[0],
325 infos[1],
326 infos[2],
327 *(PolymorphicDowncast<const GatherDescriptor*>(&descriptor)),
328 reasonIfUnsupported);
329 case LayerType::Input:
330 return IsInputSupported(infos[0], reasonIfUnsupported);
331 case LayerType::InstanceNormalization:
332 return IsInstanceNormalizationSupported(infos[0],
333 infos[1],
334 *(PolymorphicDowncast<const InstanceNormalizationDescriptor*>
335 (&descriptor)),
336 reasonIfUnsupported);
337 case LayerType::L2Normalization:
338 return IsL2NormalizationSupported(infos[0],
339 infos[1],
340 *(PolymorphicDowncast<const L2NormalizationDescriptor*>(&descriptor)),
341 reasonIfUnsupported);
342 case LayerType::LogicalBinary:
343 return IsLogicalBinarySupported(infos[0],
344 infos[1],
345 infos[2],
346 *(PolymorphicDowncast<const LogicalBinaryDescriptor*>(&descriptor)),
347 reasonIfUnsupported);
348 case LayerType::LogSoftmax:
349 return IsLogSoftmaxSupported(infos[0],
350 infos[1],
351 *(PolymorphicDowncast<const LogSoftmaxDescriptor*>(&descriptor)),
352 reasonIfUnsupported);
353 case LayerType::Lstm:
354 return IsLstmSupported(infos[0],
355 infos[1],
356 infos[2],
357 infos[3],
358 infos[4],
359 infos[5],
360 infos[6],
361 *(PolymorphicDowncast<const LstmDescriptor*>(&descriptor)),
362 lstmParamsInfo.value(),
363 reasonIfUnsupported);
364 case LayerType::QLstm:
365 return IsQLstmSupported(infos[0],
366 infos[1],
367 infos[2],
368 infos[3],
369 infos[4],
370 infos[5],
371 *(PolymorphicDowncast<const QLstmDescriptor*>(&descriptor)),
372 lstmParamsInfo.value(),
373 reasonIfUnsupported);
374 case LayerType::Maximum:
375 return IsMaximumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
376 case LayerType::Mean:
377 return IsMeanSupported(infos[0],
378 infos[1],
379 *(PolymorphicDowncast<const MeanDescriptor*>(&descriptor)),
380 reasonIfUnsupported);
381 case LayerType::Minimum:
382 return IsMinimumSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
383 case LayerType::Multiplication:
384 return IsMultiplicationSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
385 case LayerType::Normalization:
386 return IsNormalizationSupported(infos[0],
387 infos[1],
388 *(PolymorphicDowncast<const NormalizationDescriptor*>(&descriptor)),
389 reasonIfUnsupported);
390 case LayerType::Output:
391 return IsOutputSupported(infos[0], reasonIfUnsupported);
392 case LayerType::Pad:
393 return IsPadSupported(infos[0],
394 infos[1],
395 *(PolymorphicDowncast<const PadDescriptor*>(&descriptor)),
396 reasonIfUnsupported);
397 case LayerType::Permute:
398 return IsPermuteSupported(infos[0],
399 infos[1],
400 *(PolymorphicDowncast<const PermuteDescriptor*>(&descriptor)),
401 reasonIfUnsupported);
402 case LayerType::Pooling2d:
403 return IsPooling2dSupported(infos[0],
404 infos[1],
405 *(PolymorphicDowncast<const Pooling2dDescriptor*>(&descriptor)),
406 reasonIfUnsupported);
407 case LayerType::Prelu:
408 return IsPreluSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
409 case LayerType::Quantize:
410 return IsQuantizeSupported(infos[0], infos[1], reasonIfUnsupported);
411 case LayerType::QuantizedLstm:
412 return IsQuantizedLstmSupported(infos[0],
413 infos[1],
414 infos[2],
415 infos[3],
416 infos[4],
417 quantizedLstmParamsInfo.value(),
418 reasonIfUnsupported);
419 case LayerType::Reshape:
420 return IsReshapeSupported(infos[0],
421 infos[1],
422 *(PolymorphicDowncast<const ReshapeDescriptor*>(&descriptor)),
423 reasonIfUnsupported);
424 case LayerType::Resize:
425 return IsResizeSupported(infos[0],
426 infos[1],
427 *(PolymorphicDowncast<const ResizeDescriptor*>(&descriptor)),
428 reasonIfUnsupported);
429 case LayerType::Reduce:
430 return IsReduceSupported(infos[0],
431 infos[1],
432 *(PolymorphicDowncast<const ReduceDescriptor*>(&descriptor)),
433 reasonIfUnsupported);
434 case LayerType::Slice:
435 return IsSliceSupported(infos[0],
436 infos[1],
437 *(PolymorphicDowncast<const SliceDescriptor*>(&descriptor)),
438 reasonIfUnsupported);
439 case LayerType::Softmax:
440 return IsSoftmaxSupported(infos[0],
441 infos[1],
442 *(PolymorphicDowncast<const SoftmaxDescriptor*>(&descriptor)),
443 reasonIfUnsupported);
444 case LayerType::SpaceToBatchNd:
445 return IsSpaceToBatchNdSupported(infos[0],
446 infos[1],
447 *(PolymorphicDowncast<const SpaceToBatchNdDescriptor*>(&descriptor)),
448 reasonIfUnsupported);
449 case LayerType::SpaceToDepth:
450 return IsSpaceToDepthSupported(infos[0],
451 infos[1],
452 *(PolymorphicDowncast<const SpaceToDepthDescriptor*>(&descriptor)),
453 reasonIfUnsupported);
454 case LayerType::Splitter:
455 {
456 std::vector<TensorInfo> outputInfos;
457 for (uint32_t i = 1; i < infos.size(); i++)
458 {
459 outputInfos.push_back(infos[i]);
460 }
461 return IsSplitterSupported(infos[0],
462 {outputInfos.begin(), outputInfos.end()},
463 *(PolymorphicDowncast<const ViewsDescriptor*>(&descriptor)),
464 reasonIfUnsupported);
465 }
466 case LayerType::Stack:
467 {
468 std::vector<const TensorInfo*> inputInfos;
469 for (uint32_t i = 0; i < infos.size() - 1; i++)
470 {
471 inputInfos.push_back(&infos[i]);
472 }
473 return IsStackSupported(inputInfos,
474 infos[infos.size() - 1],
475 *(PolymorphicDowncast<const StackDescriptor*>(&descriptor)),
476 reasonIfUnsupported);
477 }
478 case LayerType::StridedSlice:
479 return IsStridedSliceSupported(infos[0],
480 infos[1],
481 *(PolymorphicDowncast<const StridedSliceDescriptor*>(&descriptor)),
482 reasonIfUnsupported);
483 case LayerType::Subtraction:
484 return IsSubtractionSupported(infos[0], infos[1], infos[2], reasonIfUnsupported);
485 case LayerType::Transpose:
486 return IsTransposeSupported(infos[0],
487 infos[1],
488 *(PolymorphicDowncast<const TransposeDescriptor*>(&descriptor)),
489 reasonIfUnsupported);
490 case LayerType::TransposeConvolution2d:
491 {
492 if (infos.size() != 4)
493 {
494 throw InvalidArgumentException("Invalid number of TransposeConvolution2d TensorInfos. "
495 "TensorInfos should be of format: {input, output, weights, biases}.");
496 }
497
498 auto desc = *(PolymorphicDowncast<const TransposeConvolution2dDescriptor*>(&descriptor));
499 if (infos[3] == TensorInfo())
500 {
501 return IsTransposeConvolution2dSupported(infos[0],
502 infos[1],
503 desc,
504 infos[2],
505 EmptyOptional(),
506 reasonIfUnsupported);
507 }
508 else
509 {
510 return IsTransposeConvolution2dSupported(infos[0],
511 infos[1],
512 desc,
513 infos[2],
514 infos[3],
515 reasonIfUnsupported);
516 }
517 }
518 case LayerType::Cast:
519 return IsCastSupported(infos[0], infos[1], reasonIfUnsupported);
520 case LayerType::ChannelShuffle:
521 return IsChannelShuffleSupported(infos[0],
522 infos[1],
523 *(PolymorphicDowncast<const ChannelShuffleDescriptor*>(&descriptor)),
524 reasonIfUnsupported);
525 case LayerType::Convolution3d:
526 {
527 if (infos.size() != 4)
528 {
529 throw InvalidArgumentException("Invalid number of Convolution3d TensorInfos. "
530 "TensorInfos should be of format: {input, output, weights, biases}.");
531 }
532
533 auto desc = *(PolymorphicDowncast<const Convolution3dDescriptor*>(&descriptor));
534 if (infos[3] == TensorInfo())
535 {
536 return IsConvolution3dSupported(infos[0],
537 infos[1],
538 desc,
539 infos[2],
540 EmptyOptional(),
541 reasonIfUnsupported);
542 }
543 else
544 {
545 return IsConvolution3dSupported(infos[0],
546 infos[1],
547 desc,
548 infos[2],
549 infos[3],
550 reasonIfUnsupported);
551 }
552 }
553 case LayerType::MemCopy:
554 return LayerSupportBase::IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
555 case LayerType::MemImport:
556 return LayerSupportBase::IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
557 case LayerType::Map:
558 return true;
559 case LayerType::Unmap:
560 return true;
561 case LayerType::Merge:
562 return LayerSupportBase::IsMergeSupported(infos[0],
563 infos[1],
564 infos[2],
565 reasonIfUnsupported);
566 case LayerType::Rank:
567 return true;
568 case LayerType::Shape:
569 return LayerSupportBase::IsShapeSupported(infos[0],
570 infos[1],
571 reasonIfUnsupported);
572 case LayerType::ConvertBf16ToFp32:
573 return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0],
574 infos[1],
575 reasonIfUnsupported);
576 case LayerType::ConvertFp32ToBf16:
577 return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0],
578 infos[1],
579 reasonIfUnsupported);
580 default:
581 // layers not supported in cl by default:
582 // debug, detectionpostprocess, fakequantization, precompiled,
583 // standin, switch, unidirectionalsequencelstm, pooling3d
584 return false;
585 }
586}
587
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100588bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
589 const TensorInfo& output,
590 const ActivationDescriptor& descriptor,
591 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000592{
telsoa01c577f2c2018-08-31 09:22:23 +0100593 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
594 reasonIfUnsupported,
595 input,
596 output,
597 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000598}
599
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100600bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
601 const TensorInfo& input1,
602 const TensorInfo& output,
603 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000604{
arovir01085f0a42018-10-08 14:48:19 +0100605 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
606 reasonIfUnsupported,
607 input0,
608 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000609 output,
610 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000611}
612
James Conroy2dc05722019-09-19 17:00:31 +0100613bool ClLayerSupport::IsArgMinMaxSupported(const TensorInfo& input,
614 const TensorInfo& output,
615 const ArgMinMaxDescriptor& descriptor,
616 Optional<std::string&> reasonIfUnsupported) const
617{
Francis Murtagh52ec3462019-11-19 12:24:19 +0000618
James Conroy2dc05722019-09-19 17:00:31 +0100619 FORWARD_WORKLOAD_VALIDATE_FUNC(ClArgMinMaxWorkloadValidate,
620 reasonIfUnsupported,
621 input,
622 output,
623 descriptor);
624}
625
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100626bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
627 const TensorInfo& output,
628 const TensorInfo& mean,
629 const TensorInfo& var,
630 const TensorInfo& beta,
631 const TensorInfo& gamma,
632 const BatchNormalizationDescriptor& descriptor,
633 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000634{
telsoa01c577f2c2018-08-31 09:22:23 +0100635 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
636 reasonIfUnsupported,
637 input,
638 output,
639 mean,
640 var,
641 beta,
642 gamma,
Mike Kelly07810fc2020-11-12 10:58:48 +0000643 descriptor,
644 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000645}
646
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100647bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
648 const TensorInfo& output,
649 const BatchToSpaceNdDescriptor& descriptor,
650 Optional<std::string&> reasonIfUnsupported) const
651{
652 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
653 reasonIfUnsupported,
654 input,
655 output,
656 descriptor);
657}
658
Sadik Armaganf40d6d42021-04-22 09:12:11 +0100659bool ClLayerSupport::IsCastSupported(const TensorInfo& input,
660 const TensorInfo& output,
661 Optional<std::string&> reasonIfUnsupported) const
662{
663 FORWARD_WORKLOAD_VALIDATE_FUNC(ClCastValidate,
664 reasonIfUnsupported,
665 input,
666 output);
667}
668
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100669bool ClLayerSupport::IsChannelShuffleSupported(const TensorInfo& input,
Mike Kelly831faed2018-11-28 11:52:08 +0000670 const TensorInfo& output,
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100671 const ChannelShuffleDescriptor& descriptor,
Mike Kelly831faed2018-11-28 11:52:08 +0000672 Optional<std::string&> reasonIfUnsupported) const
673{
Teresa Charlin1222dbd2021-09-02 13:58:52 +0100674 FORWARD_WORKLOAD_VALIDATE_FUNC(ClChannelShuffleValidate,
Mike Kelly831faed2018-11-28 11:52:08 +0000675 reasonIfUnsupported,
676 input,
677 output,
678 descriptor);
679}
680
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100681bool ClLayerSupport::IsComparisonSupported(const TensorInfo& input0,
682 const TensorInfo& input1,
683 const TensorInfo& output,
684 const ComparisonDescriptor& descriptor,
685 Optional<std::string&> reasonIfUnsupported) const
686{
Teresa Charlin2b030d92020-03-27 16:40:56 +0000687 FORWARD_WORKLOAD_VALIDATE_FUNC(ClComparisonWorkloadValidate,
688 reasonIfUnsupported,
689 input0,
690 input1,
691 output,
692 descriptor);
Aron Virginas-Tar77bfb5e2019-10-16 17:45:38 +0100693}
694
Jim Flynn906f9462019-05-10 13:55:21 +0100695bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
696 const TensorInfo& output,
Cathal Corbett34b429c2021-12-24 12:24:40 +0000697 const OriginsDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100698 Optional<std::string&> reasonIfUnsupported) const
699{
Jim Flynne242f2d2019-05-22 14:24:13 +0100700 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
701 {
702 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
703 return false;
704 }
705
706 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
707 if(concatInnerAxis < 3) // Width, height, or channels
708 {
709 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
710 reasonIfUnsupported,
711 inputs,
712 output,
713 descriptor);
714 }
715 else if (concatInnerAxis == 3)
716 {
717 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
718 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
719 for (auto& input : inputs)
720 {
721 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
722 {
723 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
724 return false;
725 }
726 }
727 return true; // Sub-tensors support concat along batch
728 }
729 else // > 4 dimensions not supported.
730 {
731 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
732 return false;
733 }
Jim Flynn906f9462019-05-10 13:55:21 +0100734}
735
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100736bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
737 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000738{
Mike Kelly0886ac42020-04-27 09:55:40 +0100739 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
740 reasonIfUnsupported,
741 output);
telsoa014fcda012018-03-09 14:13:49 +0000742}
743
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100744bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
745 const TensorInfo& output,
746 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000747{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100748 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
749 reasonIfUnsupported,
750 input,
751 output);
telsoa014fcda012018-03-09 14:13:49 +0000752}
753
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100754bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
755 const TensorInfo& output,
756 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000757{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100758 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
759 reasonIfUnsupported,
760 input,
761 output);
telsoa014fcda012018-03-09 14:13:49 +0000762}
763
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100764bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
765 const TensorInfo& output,
766 const Convolution2dDescriptor& descriptor,
767 const TensorInfo& weights,
768 const Optional<TensorInfo>& biases,
769 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000770{
Sadik Armagan045f6be2020-09-10 13:37:32 +0100771 bool isFastMathEnabled = false;
772#if defined(ARMCOMPUTECL_ENABLED)
773 if (m_ModelContextPtr)
774 {
775 if (m_ModelContextPtr.get() != nullptr)
776 {
777 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
778 if (modelOptions)
779 {
780 isFastMathEnabled = modelOptions->IsFastMathEnabled();
781 }
782 }
783 }
784#endif
785
surmeh013537c2c2018-05-18 16:31:43 +0100786 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
787 reasonIfUnsupported,
788 input,
789 output,
790 descriptor,
791 weights,
Sadik Armagan045f6be2020-09-10 13:37:32 +0100792 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000793 isFastMathEnabled,
794 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000795}
796
Teresa Charlin615ad6c2021-10-26 12:22:20 +0100797bool ClLayerSupport::IsConvolution3dSupported(const TensorInfo& input,
798 const TensorInfo& output,
799 const Convolution3dDescriptor& descriptor,
800 const TensorInfo& weights,
801 const Optional<TensorInfo>& biases,
802 Optional<std::string&> reasonIfUnsupported) const
803{
804 bool isFastMathEnabled = false;
805#if defined(ARMCOMPUTECL_ENABLED)
806 if (m_ModelContextPtr)
807{
808 if (m_ModelContextPtr.get() != nullptr)
809 {
810 auto modelOptions = dynamic_cast<ClBackendModelContext*>(m_ModelContextPtr.get());
811 if (modelOptions)
812 {
813 isFastMathEnabled = modelOptions->IsFastMathEnabled();
814 }
815 }
816}
817#endif
818
819 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution3dWorkloadValidate,
820 reasonIfUnsupported,
821 input,
822 output,
823 descriptor,
824 weights,
825 biases,
826 isFastMathEnabled,
827 nullptr);
828}
829
Jim Flynn983daec2019-05-29 16:20:16 +0100830bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
831 const TensorInfo& output,
832 Optional<std::string&> reasonIfUnsupported) const
833{
834 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
835 reasonIfUnsupported,
836 input,
837 output);
838}
839
Aron Virginas-Tarb2801962019-09-30 11:24:53 +0100840bool ClLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
841 const TensorInfo& output,
842 const DepthToSpaceDescriptor& descriptor,
843 Optional<std::string&> reasonIfUnsupported) const
844{
845 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthToSpaceWorkloadValidate,
846 reasonIfUnsupported,
847 input,
848 output,
849 descriptor);
850}
851
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100852bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
853 const TensorInfo& output,
854 const DepthwiseConvolution2dDescriptor& descriptor,
855 const TensorInfo& weights,
856 const Optional<TensorInfo>& biases,
857 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000858{
telsoa01c577f2c2018-08-31 09:22:23 +0100859 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
860 reasonIfUnsupported,
861 input,
862 output,
863 descriptor,
864 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000865 biases,
866 nullptr);
telsoa014fcda012018-03-09 14:13:49 +0000867}
868
Pablo Tellof0bd6832019-04-26 17:58:13 +0100869bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
870 const TensorInfo& output,
871 const DepthwiseConvolution2dDescriptor& descriptor,
872 const TensorInfo& weights,
873 const Optional<TensorInfo>& biases,
874 Optional<std::string&> reasonIfUnsupported) const
875{
876 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
877 reasonIfUnsupported,
878 input,
879 output,
880 descriptor,
881 weights,
Mike Kelly07810fc2020-11-12 10:58:48 +0000882 biases,
883 nullptr);
Pablo Tellof0bd6832019-04-26 17:58:13 +0100884}
885
886
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100887bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
888 const TensorInfo& input1,
889 const TensorInfo& output,
890 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100891{
892 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
893 reasonIfUnsupported,
894 input0,
895 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +0000896 output,
897 nullptr);
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100898}
899
josh minor4a3c6102020-01-06 16:40:46 -0600900bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
901 const TensorInfo& output,
902 const ElementwiseUnaryDescriptor& descriptor,
903 Optional<std::string&> reasonIfUnsupported) const
904{
Sadik Armagan9fabf432020-05-27 13:40:58 +0100905 switch(descriptor.m_Operation)
josh minor4a3c6102020-01-06 16:40:46 -0600906 {
Sadik Armagan9fabf432020-05-27 13:40:58 +0100907 case UnaryOperation::Abs:
908 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
909 reasonIfUnsupported,
910 input,
911 output);
912 case UnaryOperation::Exp:
913 FORWARD_WORKLOAD_VALIDATE_FUNC(ClExpWorkloadValidate,
914 reasonIfUnsupported,
915 input,
916 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100917 case UnaryOperation::Log:
918 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogWorkloadValidate,
919 reasonIfUnsupported,
920 input,
921 output);
922 case UnaryOperation::LogicalNot:
923 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
924 reasonIfUnsupported,
925 input,
926 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100927 case UnaryOperation::Neg:
928 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
929 reasonIfUnsupported,
930 input,
931 output);
932 case UnaryOperation::Rsqrt:
933 FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
934 reasonIfUnsupported,
935 input,
936 output);
Teresa Charlin50de4fa2021-05-31 18:47:33 +0100937 case UnaryOperation::Sin:
938 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSinWorkloadValidate,
James Conroyfe3ec942020-11-18 14:20:53 +0000939 reasonIfUnsupported,
940 input,
941 output);
Sadik Armagan9fabf432020-05-27 13:40:58 +0100942 default:
943 return false;
josh minor4a3c6102020-01-06 16:40:46 -0600944 }
josh minor4a3c6102020-01-06 16:40:46 -0600945}
946
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100947bool ClLayerSupport::IsFillSupported(const TensorInfo& input,
948 const TensorInfo& output,
949 const FillDescriptor& descriptor,
950 Optional<std::string&> reasonIfUnsupported) const
Sadik Armagan66aecb02020-06-24 11:42:20 +0100951{
Teresa Charlin4b10fef2020-07-29 09:36:41 +0100952 armnn::IgnoreUnused(input);
953 armnn::IgnoreUnused(output);
954 armnn::IgnoreUnused(descriptor);
955
956 return IsClBackendSupported(reasonIfUnsupported);
Sadik Armagan66aecb02020-06-24 11:42:20 +0100957}
958
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100959bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
960 const TensorInfo& output,
961 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000962{
Sadik Armagan9be49162019-10-30 16:15:26 +0000963 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFloorWorkloadValidate,
964 reasonIfUnsupported,
965 input,
966 output);
telsoa01c577f2c2018-08-31 09:22:23 +0100967}
968
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100969bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
970 const TensorInfo& output,
971 const TensorInfo& weights,
972 const TensorInfo& biases,
973 const FullyConnectedDescriptor& descriptor,
974 Optional<std::string&> reasonIfUnsupported) const
975{
976 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
977 reasonIfUnsupported,
978 input,
979 output,
980 weights,
981 biases,
Mike Kelly07810fc2020-11-12 10:58:48 +0000982 descriptor,
983 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100984}
985
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +0100986bool ClLayerSupport::IsGatherSupported(const TensorInfo& input0,
987 const TensorInfo& input1,
988 const TensorInfo& output,
Teresa Charlin52664732020-06-29 16:27:03 +0100989 const GatherDescriptor& descriptor,
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +0100990 Optional<std::string&> reasonIfUnsupported) const
991{
992 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGatherWorkloadValidate,
993 reasonIfUnsupported,
994 input0,
995 input1,
Teresa Charlin52664732020-06-29 16:27:03 +0100996 output,
997 descriptor);
Teresa Charlin9ad2e5b2020-04-10 22:34:48 +0100998}
999
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001000bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
1001 Optional<std::string&> reasonIfUnsupported) const
1002{
Derek Lamberti901ea112019-12-10 22:07:09 +00001003 return IsClBackendSupported(reasonIfUnsupported, input);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001004}
1005
Aron Virginas-Tar8168f402019-10-04 13:10:16 +01001006bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
1007 const TensorInfo& output,
1008 const InstanceNormalizationDescriptor& descriptor,
1009 Optional<std::string&> reasonIfUnsupported) const
1010{
1011 FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
1012 reasonIfUnsupported,
1013 input,
1014 output,
1015 descriptor);
1016}
1017
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001018bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
1019 const TensorInfo& output,
1020 const L2NormalizationDescriptor& descriptor,
1021 Optional<std::string&> reasonIfUnsupported) const
1022{
1023 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
1024 reasonIfUnsupported,
1025 input,
1026 output,
1027 descriptor);
1028}
1029
James Conroyfe3ec942020-11-18 14:20:53 +00001030bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
1031 const TensorInfo& input1,
1032 const TensorInfo& output,
1033 const LogicalBinaryDescriptor& descriptor,
1034 Optional<std::string&> reasonIfUnsupported) const
1035{
1036 IgnoreUnused(output);
1037
1038 switch(descriptor.m_Operation)
1039 {
1040 case LogicalBinaryOperation::LogicalAnd:
1041 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
1042 reasonIfUnsupported,
1043 input0,
1044 input1,
1045 output);
1046 case LogicalBinaryOperation::LogicalOr:
1047 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
1048 reasonIfUnsupported,
1049 input0,
1050 input1,
1051 output);
1052 default:
1053 return false;
1054 }
1055}
1056
1057
Teresa Charlin8398edc2020-07-20 14:23:02 +01001058bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
1059 const TensorInfo& output,
1060 const LogSoftmaxDescriptor& descriptor,
1061 Optional<std::string&> reasonIfUnsupported) const
1062{
1063 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogSoftmaxWorkloadValidate,
1064 reasonIfUnsupported,
1065 input,
1066 output,
1067 descriptor);
1068}
1069
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001070bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
1071 const TensorInfo& outputStateIn,
1072 const TensorInfo& cellStateIn,
1073 const TensorInfo& scratchBuffer,
1074 const TensorInfo& outputStateOut,
1075 const TensorInfo& cellStateOut,
1076 const TensorInfo& output,
1077 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001078 const LstmInputParamsInfo& paramsInfo,
1079 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +01001080{
arovir01085f0a42018-10-08 14:48:19 +01001081 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
1082 reasonIfUnsupported,
1083 input,
1084 outputStateIn,
1085 cellStateIn,
1086 scratchBuffer,
1087 outputStateOut,
1088 cellStateOut,
1089 output,
1090 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +01001091 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001092}
1093
keidav01a959ee52018-12-19 10:04:58 +00001094bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
1095 const TensorInfo& input1,
1096 const TensorInfo& output,
1097 Optional<std::string&> reasonIfUnsupported) const
1098{
1099 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
1100 reasonIfUnsupported,
1101 input0,
1102 input1,
1103 output);
1104}
1105
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001106bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
1107 const TensorInfo& output,
1108 const MeanDescriptor& descriptor,
1109 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +01001110{
Matteo Martincigh28dcab62018-10-19 16:40:03 +01001111 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
1112 reasonIfUnsupported,
1113 input,
1114 output,
1115 descriptor);
narpra0132b90462018-09-13 11:07:48 +01001116}
1117
saoste019292aa32019-01-08 13:55:59 +00001118bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
1119 const TensorInfo& input1,
1120 const TensorInfo& output,
1121 Optional<std::string&> reasonIfUnsupported) const
1122{
1123 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
1124 reasonIfUnsupported,
1125 input0,
1126 input1,
1127 output);
1128}
1129
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001130bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
1131 const TensorInfo& input1,
1132 const TensorInfo& output,
1133 Optional<std::string&> reasonIfUnsupported) const
1134{
1135 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
1136 reasonIfUnsupported,
1137 input0,
1138 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001139 output,
1140 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001141}
1142
1143bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
1144 const TensorInfo& output,
1145 const NormalizationDescriptor& descriptor,
1146 Optional<std::string&> reasonIfUnsupported) const
1147{
1148 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1149}
1150
1151bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
1152 Optional<std::string&> reasonIfUnsupported) const
1153{
Derek Lamberti901ea112019-12-10 22:07:09 +00001154 return IsClBackendSupported(reasonIfUnsupported, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001155}
1156
1157bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
1158 const TensorInfo& output,
1159 const PadDescriptor& descriptor,
1160 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +01001161{
1162 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
1163 reasonIfUnsupported,
1164 input,
1165 output,
1166 descriptor);
1167}
1168
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001169bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
1170 const TensorInfo& output,
1171 const PermuteDescriptor& descriptor,
1172 Optional<std::string&> reasonIfUnsupported) const
1173{
Matthew Bentham9820d302019-11-27 17:24:47 +00001174 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
telsoa014fcda012018-03-09 14:13:49 +00001175}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001176
1177bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
1178 const TensorInfo& output,
1179 const Pooling2dDescriptor& descriptor,
1180 Optional<std::string&> reasonIfUnsupported) const
1181{
1182 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1183}
1184
Nikhil Raj91e4c6d2019-07-05 12:22:58 +01001185bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
1186 const armnn::TensorInfo &alpha,
1187 const armnn::TensorInfo &output,
1188 armnn::Optional<std::string &> reasonIfUnsupported) const
1189{
1190 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
1191}
1192
Ryan OShea2323af42020-05-13 16:36:19 +01001193bool ClLayerSupport::IsQLstmSupported(const TensorInfo& input,
1194 const TensorInfo& previousOutputIn,
1195 const TensorInfo& previousCellStateIn,
1196 const TensorInfo& outputStateOut,
1197 const TensorInfo& cellStateOut,
1198 const TensorInfo& output,
1199 const QLstmDescriptor& descriptor,
1200 const LstmInputParamsInfo& paramsInfo,
1201 Optional<std::string&> reasonIfUnsupported) const
1202{
1203 if (input.GetDataType() == armnn::DataType::QAsymmS8 &&
1204 previousOutputIn.GetDataType() == armnn::DataType::QAsymmS8 &&
1205 previousCellStateIn.GetDataType() == armnn::DataType::QSymmS16 &&
1206 outputStateOut.GetDataType() == armnn::DataType::QAsymmS8 &&
1207 cellStateOut.GetDataType() == armnn::DataType::QSymmS16 &&
1208 output.GetDataType() == armnn::DataType::QAsymmS8)
1209 {
1210 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQLstmWorkloadValidate,
1211 reasonIfUnsupported,
1212 input,
1213 previousCellStateIn,
1214 previousOutputIn,
1215 cellStateOut,
1216 outputStateOut,
1217 output,
1218 descriptor,
1219 paramsInfo);
1220 }
1221 else
1222 {
1223 return false;
1224 }
1225}
1226
Ferran Balaguer737d9ff2019-08-01 09:58:08 +01001227bool ClLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
1228 const TensorInfo& previousCellStateIn,
1229 const TensorInfo& previousOutputIn,
1230 const TensorInfo& cellStateOut,
1231 const TensorInfo& output,
1232 const QuantizedLstmInputParamsInfo& paramsInfo,
1233 Optional<std::string&> reasonIfUnsupported) const
1234{
1235 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizedLstmWorkloadValidate,
1236 reasonIfUnsupported,
1237 input,
1238 previousCellStateIn,
1239 previousOutputIn,
1240 cellStateOut,
1241 output,
1242 paramsInfo);
1243}
1244
Sadik Armagan20ec2492019-05-31 09:09:44 +01001245bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
1246 const TensorInfo& output,
1247 Optional<std::string&> reasonIfUnsupported) const
1248{
1249 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
1250 reasonIfUnsupported,
1251 input,
1252 output);
1253}
1254
Sadik Armagana2747482021-02-09 10:28:54 +00001255bool ClLayerSupport::IsReduceSupported(const TensorInfo& input,
1256 const TensorInfo& output,
1257 const ReduceDescriptor& descriptor,
1258 Optional<std::string&> reasonIfUnsupported) const
1259{
1260 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReduceWorkloadValidate,
1261 reasonIfUnsupported,
1262 input,
1263 output,
1264 descriptor);
1265}
1266
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001267bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Kevin Maya023c402019-12-12 17:28:05 +00001268 const TensorInfo& output,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +00001269 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001270 Optional<std::string&> reasonIfUnsupported) const
1271{
Jan Eilers8eb25602020-03-09 12:13:48 +00001272 IgnoreUnused(descriptor);
Kevin Maya023c402019-12-12 17:28:05 +00001273 FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001274}
1275
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001276bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
1277 const TensorInfo& output,
1278 const ResizeDescriptor& descriptor,
1279 Optional<std::string&> reasonIfUnsupported) const
1280{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +01001281 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +01001282}
1283
Aron Virginas-Tar94c4fef2019-11-25 15:37:08 +00001284bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
1285 const TensorInfo& output,
1286 const SliceDescriptor& descriptor,
1287 Optional<std::string&> reasonIfUnsupported) const
1288{
1289 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSliceWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1290}
1291
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001292bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
1293 const TensorInfo& output,
1294 const SoftmaxDescriptor& descriptor,
1295 Optional<std::string&> reasonIfUnsupported) const
1296{
Francis Murtagh3b938352019-07-26 15:44:17 +01001297 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001298}
1299
Sadik Armaganf4464322018-12-20 16:19:12 +00001300bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
1301 const TensorInfo& output,
1302 const SpaceToBatchNdDescriptor& descriptor,
1303 Optional<std::string&> reasonIfUnsupported) const
1304{
1305 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
1306 reasonIfUnsupported,
1307 input,
1308 output,
1309 descriptor);
1310}
1311
James Conroyd2aa85e2019-07-01 17:12:40 +01001312bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
1313 const TensorInfo& output,
1314 const SpaceToDepthDescriptor& descriptor,
1315 Optional<std::string&> reasonIfUnsupported) const
1316{
1317 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
1318 reasonIfUnsupported,
1319 input,
1320 output,
1321 descriptor);
1322}
1323
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001324bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001325 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
1326 const ViewsDescriptor& descriptor,
1327 Optional<std::string&> reasonIfUnsupported) const
1328{
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001329#if defined(ARMCOMPUTECL_ENABLED)
1330 // Split along the last dimension, cannot use sub-tensors
1331 // as width and height of the sub-tensors do not match
1332 // the width and height of the parent tensor
1333 // in case of input with more than 2D.
1334 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
1335 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
1336 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
1337 {
1338 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
1339 reasonIfUnsupported,
1340 input,
1341 outputs,
1342 *splitAxis.begin());
1343 }
1344#endif
Jan Eilers8eb25602020-03-09 12:13:48 +00001345 IgnoreUnused(descriptor);
Narumol Prangnawarat74135832019-05-23 15:07:33 +01001346 for (auto output : outputs)
1347 {
1348 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
1349 {
1350 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
1351 return false;
1352 }
1353 }
1354 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +01001355}
1356
Matthew Jacksond5166102019-07-31 14:06:28 +01001357bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
1358 const TensorInfo& output,
1359 const StackDescriptor& descriptor,
1360 Optional<std::string&> reasonIfUnsupported) const
1361{
1362 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
1363 reasonIfUnsupported,
1364 inputs,
1365 output,
1366 descriptor);
1367}
1368
keidav01d74dc912018-12-10 18:16:07 +00001369bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
1370 const TensorInfo& output,
1371 const StridedSliceDescriptor& descriptor,
1372 Optional<std::string&> reasonIfUnsupported) const
1373{
1374 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
1375 reasonIfUnsupported,
1376 input,
1377 output,
1378 descriptor);
1379}
1380
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001381bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
1382 const TensorInfo& input1,
1383 const TensorInfo& output,
1384 Optional<std::string&> reasonIfUnsupported) const
1385{
1386 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
1387 reasonIfUnsupported,
1388 input0,
1389 input1,
Mike Kelly07810fc2020-11-12 10:58:48 +00001390 output,
1391 nullptr);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001392}
1393
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +01001394bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
1395 const TensorInfo& output,
1396 const TransposeConvolution2dDescriptor& descriptor,
1397 const TensorInfo& weights,
1398 const Optional<TensorInfo>& biases,
1399 Optional<std::string&> reasonIfUnsupported) const
1400{
1401 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
1402 reasonIfUnsupported,
1403 input,
1404 output,
1405 descriptor,
1406 weights,
1407 biases);
1408}
1409
Mike Kellyc9ea45a2020-02-28 18:11:58 +00001410bool ClLayerSupport::IsTransposeSupported(const TensorInfo& input,
1411 const TensorInfo& output,
1412 const TransposeDescriptor& descriptor,
1413 Optional<std::string&> reasonIfUnsupported) const
1414{
1415 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
1416}
1417
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +01001418} // namespace armnn