blob: 48953026a15de0617b7904002bc92dc9d24748b6 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010026#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010027#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000029#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010030#include "workloads/ClL2NormalizationFloatWorkload.hpp"
31#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000032#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010033#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010034#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000035#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010036#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010037#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010038#include "workloads/ClPadWorkload.hpp"
39#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010040#include "workloads/ClPooling2dWorkload.hpp"
Sadik Armagan20ec2492019-05-31 09:09:44 +010041#include "workloads/ClQuantizeWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010042#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000043#include "workloads/ClSpaceToBatchNdWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010044#include "workloads/ClSplitterWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000045#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010046#include "workloads/ClSubtractionWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000047#endif
48
49using namespace boost;
50
51namespace armnn
52{
arovir017c22c702018-10-09 11:16:46 +010053
telsoa014fcda012018-03-09 14:13:49 +000054namespace
55{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010056
telsoa014fcda012018-03-09 14:13:49 +000057template<unsigned int FilterSize>
58bool IsMatchingSize2d(const TensorInfo& weightInfo)
59{
telsoa01c577f2c2018-08-31 09:22:23 +010060 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000061 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
62}
63
64template<uint32_t ValidStride>
65bool IsMatchingStride(uint32_t actualStride)
66{
67 return ValidStride == actualStride;
68}
69
70template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
71bool IsMatchingStride(uint32_t actualStride)
72{
73 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010074}
telsoa014fcda012018-03-09 14:13:49 +000075
arovir01085f0a42018-10-08 14:48:19 +010076bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000077{
Matteo Martincighd95e9062019-01-31 15:35:59 +000078#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000079 return true;
80#else
arovir01085f0a42018-10-08 14:48:19 +010081 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000082 {
arovir01085f0a42018-10-08 14:48:19 +010083 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000084 }
85 return false;
86#endif
87}
88
Matteo Martincighd95e9062019-01-31 15:35:59 +000089#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000090#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
91#else
92#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
93#endif
94
Matteo Martincighd95e9062019-01-31 15:35:59 +000095#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000096template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +010097inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +000098{
99 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
100 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
101 if (!supported && reasonIfUnsupported)
102 {
arovir01085f0a42018-10-08 14:48:19 +0100103 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000104 }
105 return supported;
106}
107
108#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
109 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
110#else
111#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
112 return IsClBackendSupported(reasonIfUnsupported);
113#endif
114
telsoa01c577f2c2018-08-31 09:22:23 +0100115template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100116bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000117 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100118 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000119 Uint8Func uint8FuncPtr,
120 Params&&... params)
121{
122 return IsClBackendSupported(reasonIfUnsupported) &&
123 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
124 dataType,
125 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100126 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000127 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000128 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000129 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000130 std::forward<Params>(params)...);
131}
132
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100133} // anonymous namespace
134
135bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
136 const TensorInfo& output,
137 const ActivationDescriptor& descriptor,
138 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000139{
telsoa01c577f2c2018-08-31 09:22:23 +0100140 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
141 reasonIfUnsupported,
142 input,
143 output,
144 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000145}
146
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100147bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
148 const TensorInfo& input1,
149 const TensorInfo& output,
150 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000151{
arovir01085f0a42018-10-08 14:48:19 +0100152 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
153 reasonIfUnsupported,
154 input0,
155 input1,
156 output);
telsoa014fcda012018-03-09 14:13:49 +0000157}
158
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100159bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
160 const TensorInfo& output,
161 const TensorInfo& mean,
162 const TensorInfo& var,
163 const TensorInfo& beta,
164 const TensorInfo& gamma,
165 const BatchNormalizationDescriptor& descriptor,
166 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000167{
telsoa01c577f2c2018-08-31 09:22:23 +0100168 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
169 reasonIfUnsupported,
170 input,
171 output,
172 mean,
173 var,
174 beta,
175 gamma,
176 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000177}
178
Mike Kelly831faed2018-11-28 11:52:08 +0000179bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
180 const TensorInfo& output,
181 const BatchToSpaceNdDescriptor& descriptor,
182 Optional<std::string&> reasonIfUnsupported) const
183{
184 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
185 reasonIfUnsupported,
186 input,
187 output,
188 descriptor);
189}
190
Jim Flynn906f9462019-05-10 13:55:21 +0100191bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
192 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100193 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100194 Optional<std::string&> reasonIfUnsupported) const
195{
Jim Flynne242f2d2019-05-22 14:24:13 +0100196 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
197 {
198 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
199 return false;
200 }
201
202 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
203 if(concatInnerAxis < 3) // Width, height, or channels
204 {
205 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
206 reasonIfUnsupported,
207 inputs,
208 output,
209 descriptor);
210 }
211 else if (concatInnerAxis == 3)
212 {
213 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
214 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
215 for (auto& input : inputs)
216 {
217 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
218 {
219 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
220 return false;
221 }
222 }
223 return true; // Sub-tensors support concat along batch
224 }
225 else // > 4 dimensions not supported.
226 {
227 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
228 return false;
229 }
Jim Flynn906f9462019-05-10 13:55:21 +0100230}
231
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100232bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
233 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000234{
235 return IsSupportedForDataTypeCl(reasonIfUnsupported,
236 output.GetDataType(),
237 &TrueFunc<>,
238 &FalseFuncU8<>);
239}
240
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100241bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
242 const TensorInfo& output,
243 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000244{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100245 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
246 reasonIfUnsupported,
247 input,
248 output);
telsoa014fcda012018-03-09 14:13:49 +0000249}
250
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100251bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
252 const TensorInfo& output,
253 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000254{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100255 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
256 reasonIfUnsupported,
257 input,
258 output);
telsoa014fcda012018-03-09 14:13:49 +0000259}
260
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100261bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
262 const TensorInfo& output,
263 const Convolution2dDescriptor& descriptor,
264 const TensorInfo& weights,
265 const Optional<TensorInfo>& biases,
266 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000267{
surmeh013537c2c2018-05-18 16:31:43 +0100268 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
269 reasonIfUnsupported,
270 input,
271 output,
272 descriptor,
273 weights,
274 biases);
telsoa014fcda012018-03-09 14:13:49 +0000275}
276
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100277bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
278 const TensorInfo& output,
279 const DepthwiseConvolution2dDescriptor& descriptor,
280 const TensorInfo& weights,
281 const Optional<TensorInfo>& biases,
282 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000283{
telsoa01c577f2c2018-08-31 09:22:23 +0100284 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
285 reasonIfUnsupported,
286 input,
287 output,
288 descriptor,
289 weights,
290 biases);
telsoa014fcda012018-03-09 14:13:49 +0000291}
292
Pablo Tellof0bd6832019-04-26 17:58:13 +0100293bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
294 const TensorInfo& output,
295 const DepthwiseConvolution2dDescriptor& descriptor,
296 const TensorInfo& weights,
297 const Optional<TensorInfo>& biases,
298 Optional<std::string&> reasonIfUnsupported) const
299{
300 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
301 reasonIfUnsupported,
302 input,
303 output,
304 descriptor,
305 weights,
306 biases);
307}
308
309
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100310bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
311 const TensorInfo& input1,
312 const TensorInfo& output,
313 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100314{
315 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
316 reasonIfUnsupported,
317 input0,
318 input1,
319 output);
320}
321
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100322bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
323 const TensorInfo& output,
324 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000325{
326 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100327 return IsClBackendSupported(reasonIfUnsupported) &&
328 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
329 input.GetDataType(),
330 &FalseFuncF16<>,
331 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000332 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000333 &FalseFuncI32<>,
334 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100335}
336
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100337bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
338 const TensorInfo& output,
339 const TensorInfo& weights,
340 const TensorInfo& biases,
341 const FullyConnectedDescriptor& descriptor,
342 Optional<std::string&> reasonIfUnsupported) const
343{
344 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
345 reasonIfUnsupported,
346 input,
347 output,
348 weights,
349 biases,
350 descriptor);
351}
352
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000353bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
354 const TensorInfo& input1,
355 const TensorInfo& output,
356 Optional<std::string&> reasonIfUnsupported) const
357{
358 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
359 reasonIfUnsupported,
360 input0,
361 input1,
362 output);
363}
364
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100365bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
366 Optional<std::string&> reasonIfUnsupported) const
367{
368 return IsSupportedForDataTypeCl(reasonIfUnsupported,
369 input.GetDataType(),
370 &TrueFunc<>,
371 &TrueFunc<>);
372}
373
374bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
375 const TensorInfo& output,
376 const L2NormalizationDescriptor& descriptor,
377 Optional<std::string&> reasonIfUnsupported) const
378{
379 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
380 reasonIfUnsupported,
381 input,
382 output,
383 descriptor);
384}
385
386bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
387 const TensorInfo& outputStateIn,
388 const TensorInfo& cellStateIn,
389 const TensorInfo& scratchBuffer,
390 const TensorInfo& outputStateOut,
391 const TensorInfo& cellStateOut,
392 const TensorInfo& output,
393 const LstmDescriptor& descriptor,
394 const TensorInfo& inputToForgetWeights,
395 const TensorInfo& inputToCellWeights,
396 const TensorInfo& inputToOutputWeights,
397 const TensorInfo& recurrentToForgetWeights,
398 const TensorInfo& recurrentToCellWeights,
399 const TensorInfo& recurrentToOutputWeights,
400 const TensorInfo& forgetGateBias,
401 const TensorInfo& cellBias,
402 const TensorInfo& outputGateBias,
403 const TensorInfo* inputToInputWeights,
404 const TensorInfo* recurrentToInputWeights,
405 const TensorInfo* cellToInputWeights,
406 const TensorInfo* inputGateBias,
407 const TensorInfo* projectionWeights,
408 const TensorInfo* projectionBias,
409 const TensorInfo* cellToForgetWeights,
410 const TensorInfo* cellToOutputWeights,
411 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100412{
arovir01085f0a42018-10-08 14:48:19 +0100413 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
414 reasonIfUnsupported,
415 input,
416 outputStateIn,
417 cellStateIn,
418 scratchBuffer,
419 outputStateOut,
420 cellStateOut,
421 output,
422 descriptor,
423 inputToForgetWeights,
424 inputToCellWeights,
425 inputToOutputWeights,
426 recurrentToForgetWeights,
427 recurrentToCellWeights,
428 recurrentToOutputWeights,
429 forgetGateBias,
430 cellBias,
431 outputGateBias,
432 inputToInputWeights,
433 recurrentToInputWeights,
434 cellToInputWeights,
435 inputGateBias,
436 projectionWeights,
437 projectionBias,
438 cellToForgetWeights,
439 cellToOutputWeights);
telsoa01c577f2c2018-08-31 09:22:23 +0100440}
441
keidav01a959ee52018-12-19 10:04:58 +0000442bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
443 const TensorInfo& input1,
444 const TensorInfo& output,
445 Optional<std::string&> reasonIfUnsupported) const
446{
447 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
448 reasonIfUnsupported,
449 input0,
450 input1,
451 output);
452}
453
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100454bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
455 const TensorInfo& output,
456 const MeanDescriptor& descriptor,
457 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100458{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100459 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
460 reasonIfUnsupported,
461 input,
462 output,
463 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100464}
465
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000466bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
467 const TensorInfo &output,
468 Optional<std::string &> reasonIfUnsupported) const
469{
470 ignore_unused(input);
471 ignore_unused(output);
472 return true;
473}
474
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100475bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000476 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100477 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100478 Optional<std::string&> reasonIfUnsupported) const
479{
Jim Flynne242f2d2019-05-22 14:24:13 +0100480 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100481}
482
saoste019292aa32019-01-08 13:55:59 +0000483bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
484 const TensorInfo& input1,
485 const TensorInfo& output,
486 Optional<std::string&> reasonIfUnsupported) const
487{
488 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
489 reasonIfUnsupported,
490 input0,
491 input1,
492 output);
493}
494
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100495bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
496 const TensorInfo& input1,
497 const TensorInfo& output,
498 Optional<std::string&> reasonIfUnsupported) const
499{
500 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
501 reasonIfUnsupported,
502 input0,
503 input1,
504 output);
505}
506
507bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
508 const TensorInfo& output,
509 const NormalizationDescriptor& descriptor,
510 Optional<std::string&> reasonIfUnsupported) const
511{
512 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
513}
514
515bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
516 Optional<std::string&> reasonIfUnsupported) const
517{
kevmay012b4d88e2019-01-24 14:05:09 +0000518 return IsClBackendSupported(reasonIfUnsupported) &&
519 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
520 output.GetDataType(),
521 &TrueFunc<>,
522 &TrueFunc<>,
523 &TrueFunc<>,
524 &FalseFuncI32<>,
525 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100526}
527
528bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
529 const TensorInfo& output,
530 const PadDescriptor& descriptor,
531 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100532{
533 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
534 reasonIfUnsupported,
535 input,
536 output,
537 descriptor);
538}
539
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100540bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
541 const TensorInfo& output,
542 const PermuteDescriptor& descriptor,
543 Optional<std::string&> reasonIfUnsupported) const
544{
545 ignore_unused(input);
546 ignore_unused(output);
547 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000548}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100549
550bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
551 const TensorInfo& output,
552 const Pooling2dDescriptor& descriptor,
553 Optional<std::string&> reasonIfUnsupported) const
554{
555 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
556}
557
Sadik Armagan20ec2492019-05-31 09:09:44 +0100558bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
559 const TensorInfo& output,
560 Optional<std::string&> reasonIfUnsupported) const
561{
562 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
563 reasonIfUnsupported,
564 input,
565 output);
566}
567
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100568bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000569 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100570 Optional<std::string&> reasonIfUnsupported) const
571{
572 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000573 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100574 ignore_unused(reasonIfUnsupported);
575 return true;
576}
577
578bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000579 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100580 Optional<std::string&> reasonIfUnsupported) const
581{
Sadik Armaganc625f002018-12-17 11:32:16 +0000582 ignore_unused(output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100583 return IsSupportedForDataTypeCl(reasonIfUnsupported,
584 input.GetDataType(),
585 &TrueFunc<>,
586 &FalseFuncU8<>);
587}
588
589bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
590 const TensorInfo& output,
591 const SoftmaxDescriptor& descriptor,
592 Optional<std::string&> reasonIfUnsupported) const
593{
594 ignore_unused(descriptor);
595 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
596}
597
Sadik Armaganf4464322018-12-20 16:19:12 +0000598bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
599 const TensorInfo& output,
600 const SpaceToBatchNdDescriptor& descriptor,
601 Optional<std::string&> reasonIfUnsupported) const
602{
603 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
604 reasonIfUnsupported,
605 input,
606 output,
607 descriptor);
608}
609
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100610bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
611 const ViewsDescriptor& descriptor,
612 Optional<std::string&> reasonIfUnsupported) const
613{
614 ignore_unused(descriptor);
615 return IsSupportedForDataTypeCl(reasonIfUnsupported,
616 input.GetDataType(),
617 &TrueFunc<>,
618 &TrueFunc<>);
619}
620
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100621bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
622 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
623 const ViewsDescriptor& descriptor,
624 Optional<std::string&> reasonIfUnsupported) const
625{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100626#if defined(ARMCOMPUTECL_ENABLED)
627 // Split along the last dimension, cannot use sub-tensors
628 // as width and height of the sub-tensors do not match
629 // the width and height of the parent tensor
630 // in case of input with more than 2D.
631 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
632 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
633 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
634 {
635 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
636 reasonIfUnsupported,
637 input,
638 outputs,
639 *splitAxis.begin());
640 }
641#endif
642 for (auto output : outputs)
643 {
644 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
645 {
646 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
647 return false;
648 }
649 }
650 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100651}
652
keidav01d74dc912018-12-10 18:16:07 +0000653bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
654 const TensorInfo& output,
655 const StridedSliceDescriptor& descriptor,
656 Optional<std::string&> reasonIfUnsupported) const
657{
658 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
659 reasonIfUnsupported,
660 input,
661 output,
662 descriptor);
663}
664
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100665bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
666 const TensorInfo& input1,
667 const TensorInfo& output,
668 Optional<std::string&> reasonIfUnsupported) const
669{
670 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
671 reasonIfUnsupported,
672 input0,
673 input1,
674 output);
675}
676
677} // namespace armnn