blob: 78ac0e628c70b5b31af0c509906084d40379fb41 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010026#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010027#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000029#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010030#include "workloads/ClL2NormalizationFloatWorkload.hpp"
31#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000032#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010033#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010034#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000035#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010036#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010037#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010038#include "workloads/ClPadWorkload.hpp"
39#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010040#include "workloads/ClPooling2dWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010041#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000042#include "workloads/ClSpaceToBatchNdWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010043#include "workloads/ClSplitterWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000044#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010045#include "workloads/ClSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000046#endif
47
48using namespace boost;
49
50namespace armnn
51{
arovir017c22c702018-10-09 11:16:46 +010052
telsoa014fcda012018-03-09 14:13:49 +000053namespace
54{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010055
telsoa014fcda012018-03-09 14:13:49 +000056template<unsigned int FilterSize>
57bool IsMatchingSize2d(const TensorInfo& weightInfo)
58{
telsoa01c577f2c2018-08-31 09:22:23 +010059 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000060 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
61}
62
63template<uint32_t ValidStride>
64bool IsMatchingStride(uint32_t actualStride)
65{
66 return ValidStride == actualStride;
67}
68
69template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
70bool IsMatchingStride(uint32_t actualStride)
71{
72 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010073}
telsoa014fcda012018-03-09 14:13:49 +000074
arovir01085f0a42018-10-08 14:48:19 +010075bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000076{
Matteo Martincighd95e9062019-01-31 15:35:59 +000077#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000078 return true;
79#else
arovir01085f0a42018-10-08 14:48:19 +010080 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000081 {
arovir01085f0a42018-10-08 14:48:19 +010082 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000083 }
84 return false;
85#endif
86}
87
Matteo Martincighd95e9062019-01-31 15:35:59 +000088#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000089#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
90#else
91#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
92#endif
93
Matteo Martincighd95e9062019-01-31 15:35:59 +000094#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000095template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010096inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000097{
98 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
99 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
100 if (!supported && reasonIfUnsupported)
101 {
arovir01085f0a42018-10-08 14:48:19 +0100102 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000103 }
104 return supported;
105}
106
107#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
108 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
109#else
110#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
111 return IsClBackendSupported(reasonIfUnsupported);
112#endif
113
telsoa01c577f2c2018-08-31 09:22:23 +0100114template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100115bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000116 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100117 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000118 Uint8Func uint8FuncPtr,
119 Params&&... params)
120{
121 return IsClBackendSupported(reasonIfUnsupported) &&
122 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
123 dataType,
124 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100125 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000126 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000127 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000128 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000129 std::forward<Params>(params)...);
130}
131
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100132} // anonymous namespace
133
134bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
135 const TensorInfo& output,
136 const ActivationDescriptor& descriptor,
137 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000138{
telsoa01c577f2c2018-08-31 09:22:23 +0100139 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
140 reasonIfUnsupported,
141 input,
142 output,
143 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000144}
145
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100146bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
147 const TensorInfo& input1,
148 const TensorInfo& output,
149 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000150{
arovir01085f0a42018-10-08 14:48:19 +0100151 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
152 reasonIfUnsupported,
153 input0,
154 input1,
155 output);
telsoa014fcda012018-03-09 14:13:49 +0000156}
157
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100158bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
159 const TensorInfo& output,
160 const TensorInfo& mean,
161 const TensorInfo& var,
162 const TensorInfo& beta,
163 const TensorInfo& gamma,
164 const BatchNormalizationDescriptor& descriptor,
165 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000166{
telsoa01c577f2c2018-08-31 09:22:23 +0100167 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
168 reasonIfUnsupported,
169 input,
170 output,
171 mean,
172 var,
173 beta,
174 gamma,
175 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000176}
177
Mike Kelly831faed2018-11-28 11:52:08 +0000178bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
179 const TensorInfo& output,
180 const BatchToSpaceNdDescriptor& descriptor,
181 Optional<std::string&> reasonIfUnsupported) const
182{
183 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
184 reasonIfUnsupported,
185 input,
186 output,
187 descriptor);
188}
189
Jim Flynn906f9462019-05-10 13:55:21 +0100190bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
191 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100192 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100193 Optional<std::string&> reasonIfUnsupported) const
194{
Jim Flynne242f2d2019-05-22 14:24:13 +0100195 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
196 {
197 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
198 return false;
199 }
200
201 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
202 if(concatInnerAxis < 3) // Width, height, or channels
203 {
204 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
205 reasonIfUnsupported,
206 inputs,
207 output,
208 descriptor);
209 }
210 else if (concatInnerAxis == 3)
211 {
212 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
213 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
214 for (auto& input : inputs)
215 {
216 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
217 {
218 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
219 return false;
220 }
221 }
222 return true; // Sub-tensors support concat along batch
223 }
224 else // > 4 dimensions not supported.
225 {
226 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
227 return false;
228 }
Jim Flynn906f9462019-05-10 13:55:21 +0100229}
230
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100231bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
232 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000233{
234 return IsSupportedForDataTypeCl(reasonIfUnsupported,
235 output.GetDataType(),
236 &TrueFunc<>,
237 &FalseFuncU8<>);
238}
239
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100240bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
241 const TensorInfo& output,
242 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000243{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100244 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
245 reasonIfUnsupported,
246 input,
247 output);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100250bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
251 const TensorInfo& output,
252 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000253{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100254 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
255 reasonIfUnsupported,
256 input,
257 output);
telsoa014fcda012018-03-09 14:13:49 +0000258}
259
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100260bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
261 const TensorInfo& output,
262 const Convolution2dDescriptor& descriptor,
263 const TensorInfo& weights,
264 const Optional<TensorInfo>& biases,
265 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000266{
surmeh013537c2c2018-05-18 16:31:43 +0100267 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
268 reasonIfUnsupported,
269 input,
270 output,
271 descriptor,
272 weights,
273 biases);
telsoa014fcda012018-03-09 14:13:49 +0000274}
275
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100276bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
277 const TensorInfo& output,
278 const DepthwiseConvolution2dDescriptor& descriptor,
279 const TensorInfo& weights,
280 const Optional<TensorInfo>& biases,
281 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000282{
telsoa01c577f2c2018-08-31 09:22:23 +0100283 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
284 reasonIfUnsupported,
285 input,
286 output,
287 descriptor,
288 weights,
289 biases);
telsoa014fcda012018-03-09 14:13:49 +0000290}
291
Pablo Tellof0bd6832019-04-26 17:58:13 +0100292bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
293 const TensorInfo& output,
294 const DepthwiseConvolution2dDescriptor& descriptor,
295 const TensorInfo& weights,
296 const Optional<TensorInfo>& biases,
297 Optional<std::string&> reasonIfUnsupported) const
298{
299 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
300 reasonIfUnsupported,
301 input,
302 output,
303 descriptor,
304 weights,
305 biases);
306}
307
308
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100309bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
310 const TensorInfo& input1,
311 const TensorInfo& output,
312 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100313{
314 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
315 reasonIfUnsupported,
316 input0,
317 input1,
318 output);
319}
320
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100321bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
322 const TensorInfo& output,
323 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000324{
325 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100326 return IsClBackendSupported(reasonIfUnsupported) &&
327 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
328 input.GetDataType(),
329 &FalseFuncF16<>,
330 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000331 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000332 &FalseFuncI32<>,
333 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100334}
335
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100336bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
337 const TensorInfo& output,
338 const TensorInfo& weights,
339 const TensorInfo& biases,
340 const FullyConnectedDescriptor& descriptor,
341 Optional<std::string&> reasonIfUnsupported) const
342{
343 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
344 reasonIfUnsupported,
345 input,
346 output,
347 weights,
348 biases,
349 descriptor);
350}
351
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000352bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
353 const TensorInfo& input1,
354 const TensorInfo& output,
355 Optional<std::string&> reasonIfUnsupported) const
356{
357 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
358 reasonIfUnsupported,
359 input0,
360 input1,
361 output);
362}
363
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100364bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
365 Optional<std::string&> reasonIfUnsupported) const
366{
367 return IsSupportedForDataTypeCl(reasonIfUnsupported,
368 input.GetDataType(),
369 &TrueFunc<>,
370 &TrueFunc<>);
371}
372
373bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
374 const TensorInfo& output,
375 const L2NormalizationDescriptor& descriptor,
376 Optional<std::string&> reasonIfUnsupported) const
377{
378 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
379 reasonIfUnsupported,
380 input,
381 output,
382 descriptor);
383}
384
385bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
386 const TensorInfo& outputStateIn,
387 const TensorInfo& cellStateIn,
388 const TensorInfo& scratchBuffer,
389 const TensorInfo& outputStateOut,
390 const TensorInfo& cellStateOut,
391 const TensorInfo& output,
392 const LstmDescriptor& descriptor,
393 const TensorInfo& inputToForgetWeights,
394 const TensorInfo& inputToCellWeights,
395 const TensorInfo& inputToOutputWeights,
396 const TensorInfo& recurrentToForgetWeights,
397 const TensorInfo& recurrentToCellWeights,
398 const TensorInfo& recurrentToOutputWeights,
399 const TensorInfo& forgetGateBias,
400 const TensorInfo& cellBias,
401 const TensorInfo& outputGateBias,
402 const TensorInfo* inputToInputWeights,
403 const TensorInfo* recurrentToInputWeights,
404 const TensorInfo* cellToInputWeights,
405 const TensorInfo* inputGateBias,
406 const TensorInfo* projectionWeights,
407 const TensorInfo* projectionBias,
408 const TensorInfo* cellToForgetWeights,
409 const TensorInfo* cellToOutputWeights,
410 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100411{
arovir01085f0a42018-10-08 14:48:19 +0100412 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
413 reasonIfUnsupported,
414 input,
415 outputStateIn,
416 cellStateIn,
417 scratchBuffer,
418 outputStateOut,
419 cellStateOut,
420 output,
421 descriptor,
422 inputToForgetWeights,
423 inputToCellWeights,
424 inputToOutputWeights,
425 recurrentToForgetWeights,
426 recurrentToCellWeights,
427 recurrentToOutputWeights,
428 forgetGateBias,
429 cellBias,
430 outputGateBias,
431 inputToInputWeights,
432 recurrentToInputWeights,
433 cellToInputWeights,
434 inputGateBias,
435 projectionWeights,
436 projectionBias,
437 cellToForgetWeights,
438 cellToOutputWeights);
telsoa01c577f2c2018-08-31 09:22:23 +0100439}
440
keidav01a959ee52018-12-19 10:04:58 +0000441bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
442 const TensorInfo& input1,
443 const TensorInfo& output,
444 Optional<std::string&> reasonIfUnsupported) const
445{
446 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
447 reasonIfUnsupported,
448 input0,
449 input1,
450 output);
451}
452
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100453bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
454 const TensorInfo& output,
455 const MeanDescriptor& descriptor,
456 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100457{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100458 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
459 reasonIfUnsupported,
460 input,
461 output,
462 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100463}
464
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000465bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
466 const TensorInfo &output,
467 Optional<std::string &> reasonIfUnsupported) const
468{
469 ignore_unused(input);
470 ignore_unused(output);
471 return true;
472}
473
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100474bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000475 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100476 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100477 Optional<std::string&> reasonIfUnsupported) const
478{
Jim Flynne242f2d2019-05-22 14:24:13 +0100479 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100480}
481
saoste019292aa32019-01-08 13:55:59 +0000482bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
483 const TensorInfo& input1,
484 const TensorInfo& output,
485 Optional<std::string&> reasonIfUnsupported) const
486{
487 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
488 reasonIfUnsupported,
489 input0,
490 input1,
491 output);
492}
493
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100494bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
495 const TensorInfo& input1,
496 const TensorInfo& output,
497 Optional<std::string&> reasonIfUnsupported) const
498{
499 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
500 reasonIfUnsupported,
501 input0,
502 input1,
503 output);
504}
505
506bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
507 const TensorInfo& output,
508 const NormalizationDescriptor& descriptor,
509 Optional<std::string&> reasonIfUnsupported) const
510{
511 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
512}
513
514bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
515 Optional<std::string&> reasonIfUnsupported) const
516{
kevmay012b4d88e2019-01-24 14:05:09 +0000517 return IsClBackendSupported(reasonIfUnsupported) &&
518 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
519 output.GetDataType(),
520 &TrueFunc<>,
521 &TrueFunc<>,
522 &TrueFunc<>,
523 &FalseFuncI32<>,
524 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100525}
526
527bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
528 const TensorInfo& output,
529 const PadDescriptor& descriptor,
530 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100531{
532 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
533 reasonIfUnsupported,
534 input,
535 output,
536 descriptor);
537}
538
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100539bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
540 const TensorInfo& output,
541 const PermuteDescriptor& descriptor,
542 Optional<std::string&> reasonIfUnsupported) const
543{
544 ignore_unused(input);
545 ignore_unused(output);
546 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000547}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100548
549bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
550 const TensorInfo& output,
551 const Pooling2dDescriptor& descriptor,
552 Optional<std::string&> reasonIfUnsupported) const
553{
554 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
555}
556
557bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000558 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100559 Optional<std::string&> reasonIfUnsupported) const
560{
561 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000562 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100563 ignore_unused(reasonIfUnsupported);
564 return true;
565}
566
567bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000568 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100569 Optional<std::string&> reasonIfUnsupported) const
570{
Sadik Armaganc625f002018-12-17 11:32:16 +0000571 ignore_unused(output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100572 return IsSupportedForDataTypeCl(reasonIfUnsupported,
573 input.GetDataType(),
574 &TrueFunc<>,
575 &FalseFuncU8<>);
576}
577
578bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
579 const TensorInfo& output,
580 const SoftmaxDescriptor& descriptor,
581 Optional<std::string&> reasonIfUnsupported) const
582{
583 ignore_unused(descriptor);
584 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
585}
586
Sadik Armaganf4464322018-12-20 16:19:12 +0000587bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
588 const TensorInfo& output,
589 const SpaceToBatchNdDescriptor& descriptor,
590 Optional<std::string&> reasonIfUnsupported) const
591{
592 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
593 reasonIfUnsupported,
594 input,
595 output,
596 descriptor);
597}
598
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100599bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
600 const ViewsDescriptor& descriptor,
601 Optional<std::string&> reasonIfUnsupported) const
602{
603 ignore_unused(descriptor);
604 return IsSupportedForDataTypeCl(reasonIfUnsupported,
605 input.GetDataType(),
606 &TrueFunc<>,
607 &TrueFunc<>);
608}
609
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100610bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
611 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
612 const ViewsDescriptor& descriptor,
613 Optional<std::string&> reasonIfUnsupported) const
614{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100615#if defined(ARMCOMPUTECL_ENABLED)
616 // Split along the last dimension, cannot use sub-tensors
617 // as width and height of the sub-tensors do not match
618 // the width and height of the parent tensor
619 // in case of input with more than 2D.
620 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
621 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
622 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
623 {
624 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
625 reasonIfUnsupported,
626 input,
627 outputs,
628 *splitAxis.begin());
629 }
630#endif
631 for (auto output : outputs)
632 {
633 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
634 {
635 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
636 return false;
637 }
638 }
639 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100640}
641
keidav01d74dc912018-12-10 18:16:07 +0000642bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
643 const TensorInfo& output,
644 const StridedSliceDescriptor& descriptor,
645 Optional<std::string&> reasonIfUnsupported) const
646{
647 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
648 reasonIfUnsupported,
649 input,
650 output,
651 descriptor);
652}
653
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100654bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
655 const TensorInfo& input1,
656 const TensorInfo& output,
657 Optional<std::string&> reasonIfUnsupported) const
658{
659 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
660 reasonIfUnsupported,
661 input0,
662 input1,
663 output);
664}
665
666} // namespace armnn