blob: 05539623a5ecb0eda881700a0614b5496c521e81 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Jim Flynn983daec2019-05-29 16:20:16 +010026#include "workloads/ClDequantizeWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010027#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010029#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000030#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010031#include "workloads/ClL2NormalizationFloatWorkload.hpp"
32#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000033#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010034#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010035#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000036#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010037#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010038#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010039#include "workloads/ClPadWorkload.hpp"
40#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010041#include "workloads/ClPooling2dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010042#include "workloads/ClPreluWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010043#include "workloads/ClResizeWorkload.hpp"
Sadik Armagan20ec2492019-05-31 09:09:44 +010044#include "workloads/ClQuantizeWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010045#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000046#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010047#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010048#include "workloads/ClSplitterWorkload.hpp"
Matthew Jacksond5166102019-07-31 14:06:28 +010049#include "workloads/ClStackWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000050#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010051#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010052#include "workloads/ClTransposeConvolution2dWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000053#endif
54
55using namespace boost;
56
57namespace armnn
58{
arovir017c22c702018-10-09 11:16:46 +010059
telsoa014fcda012018-03-09 14:13:49 +000060namespace
61{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010062
telsoa014fcda012018-03-09 14:13:49 +000063template<unsigned int FilterSize>
64bool IsMatchingSize2d(const TensorInfo& weightInfo)
65{
telsoa01c577f2c2018-08-31 09:22:23 +010066 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000067 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
68}
69
70template<uint32_t ValidStride>
71bool IsMatchingStride(uint32_t actualStride)
72{
73 return ValidStride == actualStride;
74}
75
76template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
77bool IsMatchingStride(uint32_t actualStride)
78{
79 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010080}
telsoa014fcda012018-03-09 14:13:49 +000081
arovir01085f0a42018-10-08 14:48:19 +010082bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000083{
Matteo Martincighd95e9062019-01-31 15:35:59 +000084#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000085 return true;
86#else
arovir01085f0a42018-10-08 14:48:19 +010087 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000088 {
arovir01085f0a42018-10-08 14:48:19 +010089 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000090 }
91 return false;
92#endif
93}
94
Matteo Martincighd95e9062019-01-31 15:35:59 +000095#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000096#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
97#else
98#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
99#endif
100
Matteo Martincighd95e9062019-01-31 15:35:59 +0000101#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000102template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100103inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000104{
105 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
106 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
107 if (!supported && reasonIfUnsupported)
108 {
arovir01085f0a42018-10-08 14:48:19 +0100109 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000110 }
111 return supported;
112}
113
114#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
115 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
116#else
117#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
118 return IsClBackendSupported(reasonIfUnsupported);
119#endif
120
telsoa01c577f2c2018-08-31 09:22:23 +0100121template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100122bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000123 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100124 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000125 Uint8Func uint8FuncPtr,
126 Params&&... params)
127{
128 return IsClBackendSupported(reasonIfUnsupported) &&
129 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
130 dataType,
131 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100132 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000133 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000134 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000135 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000136 std::forward<Params>(params)...);
137}
138
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100139} // anonymous namespace
140
141bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
142 const TensorInfo& output,
143 const ActivationDescriptor& descriptor,
144 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000145{
telsoa01c577f2c2018-08-31 09:22:23 +0100146 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
147 reasonIfUnsupported,
148 input,
149 output,
150 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000151}
152
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100153bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
154 const TensorInfo& input1,
155 const TensorInfo& output,
156 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000157{
arovir01085f0a42018-10-08 14:48:19 +0100158 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
159 reasonIfUnsupported,
160 input0,
161 input1,
162 output);
telsoa014fcda012018-03-09 14:13:49 +0000163}
164
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100165bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
166 const TensorInfo& output,
167 const TensorInfo& mean,
168 const TensorInfo& var,
169 const TensorInfo& beta,
170 const TensorInfo& gamma,
171 const BatchNormalizationDescriptor& descriptor,
172 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000173{
telsoa01c577f2c2018-08-31 09:22:23 +0100174 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
175 reasonIfUnsupported,
176 input,
177 output,
178 mean,
179 var,
180 beta,
181 gamma,
182 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000183}
184
Mike Kelly831faed2018-11-28 11:52:08 +0000185bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
186 const TensorInfo& output,
187 const BatchToSpaceNdDescriptor& descriptor,
188 Optional<std::string&> reasonIfUnsupported) const
189{
190 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
191 reasonIfUnsupported,
192 input,
193 output,
194 descriptor);
195}
196
Jim Flynn906f9462019-05-10 13:55:21 +0100197bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
198 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100199 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100200 Optional<std::string&> reasonIfUnsupported) const
201{
Jim Flynne242f2d2019-05-22 14:24:13 +0100202 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
203 {
204 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
205 return false;
206 }
207
208 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
209 if(concatInnerAxis < 3) // Width, height, or channels
210 {
211 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
212 reasonIfUnsupported,
213 inputs,
214 output,
215 descriptor);
216 }
217 else if (concatInnerAxis == 3)
218 {
219 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
220 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
221 for (auto& input : inputs)
222 {
223 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
224 {
225 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
226 return false;
227 }
228 }
229 return true; // Sub-tensors support concat along batch
230 }
231 else // > 4 dimensions not supported.
232 {
233 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
234 return false;
235 }
Jim Flynn906f9462019-05-10 13:55:21 +0100236}
237
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100238bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
239 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000240{
241 return IsSupportedForDataTypeCl(reasonIfUnsupported,
242 output.GetDataType(),
243 &TrueFunc<>,
244 &FalseFuncU8<>);
245}
246
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100247bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
248 const TensorInfo& output,
249 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000250{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100251 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
252 reasonIfUnsupported,
253 input,
254 output);
telsoa014fcda012018-03-09 14:13:49 +0000255}
256
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100257bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
258 const TensorInfo& output,
259 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000260{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100261 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
262 reasonIfUnsupported,
263 input,
264 output);
telsoa014fcda012018-03-09 14:13:49 +0000265}
266
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100267bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
268 const TensorInfo& output,
269 const Convolution2dDescriptor& descriptor,
270 const TensorInfo& weights,
271 const Optional<TensorInfo>& biases,
272 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000273{
surmeh013537c2c2018-05-18 16:31:43 +0100274 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
275 reasonIfUnsupported,
276 input,
277 output,
278 descriptor,
279 weights,
280 biases);
telsoa014fcda012018-03-09 14:13:49 +0000281}
282
Jim Flynn983daec2019-05-29 16:20:16 +0100283bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
284 const TensorInfo& output,
285 Optional<std::string&> reasonIfUnsupported) const
286{
287 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
288 reasonIfUnsupported,
289 input,
290 output);
291}
292
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100293bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
294 const TensorInfo& output,
295 const DepthwiseConvolution2dDescriptor& descriptor,
296 const TensorInfo& weights,
297 const Optional<TensorInfo>& biases,
298 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000299{
telsoa01c577f2c2018-08-31 09:22:23 +0100300 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
301 reasonIfUnsupported,
302 input,
303 output,
304 descriptor,
305 weights,
306 biases);
telsoa014fcda012018-03-09 14:13:49 +0000307}
308
Pablo Tellof0bd6832019-04-26 17:58:13 +0100309bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
310 const TensorInfo& output,
311 const DepthwiseConvolution2dDescriptor& descriptor,
312 const TensorInfo& weights,
313 const Optional<TensorInfo>& biases,
314 Optional<std::string&> reasonIfUnsupported) const
315{
316 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
317 reasonIfUnsupported,
318 input,
319 output,
320 descriptor,
321 weights,
322 biases);
323}
324
325
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100326bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
327 const TensorInfo& input1,
328 const TensorInfo& output,
329 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100330{
331 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
332 reasonIfUnsupported,
333 input0,
334 input1,
335 output);
336}
337
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100338bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
339 const TensorInfo& output,
340 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000341{
342 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100343 return IsClBackendSupported(reasonIfUnsupported) &&
344 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
345 input.GetDataType(),
346 &FalseFuncF16<>,
347 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000348 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000349 &FalseFuncI32<>,
350 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100351}
352
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100353bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
354 const TensorInfo& output,
355 const TensorInfo& weights,
356 const TensorInfo& biases,
357 const FullyConnectedDescriptor& descriptor,
358 Optional<std::string&> reasonIfUnsupported) const
359{
360 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
361 reasonIfUnsupported,
362 input,
363 output,
364 weights,
365 biases,
366 descriptor);
367}
368
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000369bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
370 const TensorInfo& input1,
371 const TensorInfo& output,
372 Optional<std::string&> reasonIfUnsupported) const
373{
374 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
375 reasonIfUnsupported,
376 input0,
377 input1,
378 output);
379}
380
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100381bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
382 Optional<std::string&> reasonIfUnsupported) const
383{
384 return IsSupportedForDataTypeCl(reasonIfUnsupported,
385 input.GetDataType(),
386 &TrueFunc<>,
387 &TrueFunc<>);
388}
389
390bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
391 const TensorInfo& output,
392 const L2NormalizationDescriptor& descriptor,
393 Optional<std::string&> reasonIfUnsupported) const
394{
395 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
396 reasonIfUnsupported,
397 input,
398 output,
399 descriptor);
400}
401
402bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
403 const TensorInfo& outputStateIn,
404 const TensorInfo& cellStateIn,
405 const TensorInfo& scratchBuffer,
406 const TensorInfo& outputStateOut,
407 const TensorInfo& cellStateOut,
408 const TensorInfo& output,
409 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +0100410 const LstmInputParamsInfo& paramsInfo,
411 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100412{
arovir01085f0a42018-10-08 14:48:19 +0100413 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
414 reasonIfUnsupported,
415 input,
416 outputStateIn,
417 cellStateIn,
418 scratchBuffer,
419 outputStateOut,
420 cellStateOut,
421 output,
422 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +0100423 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100424}
425
keidav01a959ee52018-12-19 10:04:58 +0000426bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
427 const TensorInfo& input1,
428 const TensorInfo& output,
429 Optional<std::string&> reasonIfUnsupported) const
430{
431 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
432 reasonIfUnsupported,
433 input0,
434 input1,
435 output);
436}
437
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100438bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
439 const TensorInfo& output,
440 const MeanDescriptor& descriptor,
441 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100442{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100443 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
444 reasonIfUnsupported,
445 input,
446 output,
447 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100448}
449
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000450bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
451 const TensorInfo &output,
452 Optional<std::string &> reasonIfUnsupported) const
453{
454 ignore_unused(input);
455 ignore_unused(output);
456 return true;
457}
458
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100459bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000460 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100461 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100462 Optional<std::string&> reasonIfUnsupported) const
463{
Jim Flynne242f2d2019-05-22 14:24:13 +0100464 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100465}
466
saoste019292aa32019-01-08 13:55:59 +0000467bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
468 const TensorInfo& input1,
469 const TensorInfo& output,
470 Optional<std::string&> reasonIfUnsupported) const
471{
472 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
473 reasonIfUnsupported,
474 input0,
475 input1,
476 output);
477}
478
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100479bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
480 const TensorInfo& input1,
481 const TensorInfo& output,
482 Optional<std::string&> reasonIfUnsupported) const
483{
484 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
485 reasonIfUnsupported,
486 input0,
487 input1,
488 output);
489}
490
491bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
492 const TensorInfo& output,
493 const NormalizationDescriptor& descriptor,
494 Optional<std::string&> reasonIfUnsupported) const
495{
496 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
497}
498
499bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
500 Optional<std::string&> reasonIfUnsupported) const
501{
kevmay012b4d88e2019-01-24 14:05:09 +0000502 return IsClBackendSupported(reasonIfUnsupported) &&
503 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
504 output.GetDataType(),
505 &TrueFunc<>,
506 &TrueFunc<>,
507 &TrueFunc<>,
508 &FalseFuncI32<>,
509 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100510}
511
512bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
513 const TensorInfo& output,
514 const PadDescriptor& descriptor,
515 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100516{
517 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
518 reasonIfUnsupported,
519 input,
520 output,
521 descriptor);
522}
523
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100524bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
525 const TensorInfo& output,
526 const PermuteDescriptor& descriptor,
527 Optional<std::string&> reasonIfUnsupported) const
528{
529 ignore_unused(input);
530 ignore_unused(output);
531 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000532}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100533
534bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
535 const TensorInfo& output,
536 const Pooling2dDescriptor& descriptor,
537 Optional<std::string&> reasonIfUnsupported) const
538{
539 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
540}
541
Nikhil Raj91e4c6d2019-07-05 12:22:58 +0100542bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
543 const armnn::TensorInfo &alpha,
544 const armnn::TensorInfo &output,
545 armnn::Optional<std::string &> reasonIfUnsupported) const
546{
547 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
548}
549
Sadik Armagan20ec2492019-05-31 09:09:44 +0100550bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
551 const TensorInfo& output,
552 Optional<std::string&> reasonIfUnsupported) const
553{
554 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
555 reasonIfUnsupported,
556 input,
557 output);
558}
559
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100560bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000561 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100562 Optional<std::string&> reasonIfUnsupported) const
563{
564 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000565 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100566 ignore_unused(reasonIfUnsupported);
567 return true;
568}
569
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100570bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
571 const TensorInfo& output,
572 const ResizeDescriptor& descriptor,
573 Optional<std::string&> reasonIfUnsupported) const
574{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100575 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100576}
577
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100578bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000579 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100580 Optional<std::string&> reasonIfUnsupported) const
581{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100582 ResizeDescriptor descriptor;
583 descriptor.m_Method = ResizeMethod::Bilinear;
584 descriptor.m_DataLayout = DataLayout::NCHW;
585
586 const TensorShape& outputShape = output.GetShape();
587 descriptor.m_TargetHeight = outputShape[2];
588 descriptor.m_TargetWidth = outputShape[3];
589
590 return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100591}
592
593bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
594 const TensorInfo& output,
595 const SoftmaxDescriptor& descriptor,
596 Optional<std::string&> reasonIfUnsupported) const
597{
Francis Murtagh3b938352019-07-26 15:44:17 +0100598 if (!(descriptor.m_Axis == 1 ||
599 (descriptor.m_Axis < 0 && static_cast<int>(input.GetNumDimensions()) + descriptor.m_Axis == 1)))
600 {
601 SetValueChecked(reasonIfUnsupported, "Cl Softmax: Only supports Axis equal to 1.");
602 return false;
603 }
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100604 ignore_unused(descriptor);
Francis Murtagh3b938352019-07-26 15:44:17 +0100605 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100606}
607
Sadik Armaganf4464322018-12-20 16:19:12 +0000608bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
609 const TensorInfo& output,
610 const SpaceToBatchNdDescriptor& descriptor,
611 Optional<std::string&> reasonIfUnsupported) const
612{
613 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
614 reasonIfUnsupported,
615 input,
616 output,
617 descriptor);
618}
619
James Conroyd2aa85e2019-07-01 17:12:40 +0100620bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
621 const TensorInfo& output,
622 const SpaceToDepthDescriptor& descriptor,
623 Optional<std::string&> reasonIfUnsupported) const
624{
625 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
626 reasonIfUnsupported,
627 input,
628 output,
629 descriptor);
630}
631
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100632bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
633 const ViewsDescriptor& descriptor,
634 Optional<std::string&> reasonIfUnsupported) const
635{
636 ignore_unused(descriptor);
637 return IsSupportedForDataTypeCl(reasonIfUnsupported,
638 input.GetDataType(),
639 &TrueFunc<>,
640 &TrueFunc<>);
641}
642
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100643bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
644 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
645 const ViewsDescriptor& descriptor,
646 Optional<std::string&> reasonIfUnsupported) const
647{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100648#if defined(ARMCOMPUTECL_ENABLED)
649 // Split along the last dimension, cannot use sub-tensors
650 // as width and height of the sub-tensors do not match
651 // the width and height of the parent tensor
652 // in case of input with more than 2D.
653 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
654 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
655 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
656 {
657 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
658 reasonIfUnsupported,
659 input,
660 outputs,
661 *splitAxis.begin());
662 }
663#endif
664 for (auto output : outputs)
665 {
666 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
667 {
668 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
669 return false;
670 }
671 }
672 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100673}
674
Matthew Jacksond5166102019-07-31 14:06:28 +0100675bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
676 const TensorInfo& output,
677 const StackDescriptor& descriptor,
678 Optional<std::string&> reasonIfUnsupported) const
679{
680 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
681 reasonIfUnsupported,
682 inputs,
683 output,
684 descriptor);
685}
686
keidav01d74dc912018-12-10 18:16:07 +0000687bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
688 const TensorInfo& output,
689 const StridedSliceDescriptor& descriptor,
690 Optional<std::string&> reasonIfUnsupported) const
691{
692 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
693 reasonIfUnsupported,
694 input,
695 output,
696 descriptor);
697}
698
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100699bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
700 const TensorInfo& input1,
701 const TensorInfo& output,
702 Optional<std::string&> reasonIfUnsupported) const
703{
704 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
705 reasonIfUnsupported,
706 input0,
707 input1,
708 output);
709}
710
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +0100711bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
712 const TensorInfo& output,
713 const TransposeConvolution2dDescriptor& descriptor,
714 const TensorInfo& weights,
715 const Optional<TensorInfo>& biases,
716 Optional<std::string&> reasonIfUnsupported) const
717{
718 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
719 reasonIfUnsupported,
720 input,
721 output,
722 descriptor,
723 weights,
724 biases);
725}
726
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100727} // namespace armnn