blob: b737daf7f4d006bf4112d16d364537cca33f3981 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Jim Flynn983daec2019-05-29 16:20:16 +010026#include "workloads/ClDequantizeWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010027#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010029#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000030#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010031#include "workloads/ClL2NormalizationFloatWorkload.hpp"
32#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000033#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010034#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010035#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000036#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010037#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010038#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010039#include "workloads/ClPadWorkload.hpp"
40#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010041#include "workloads/ClPooling2dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010042#include "workloads/ClPreluWorkload.hpp"
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +010043#include "workloads/ClResizeWorkload.hpp"
Sadik Armagan20ec2492019-05-31 09:09:44 +010044#include "workloads/ClQuantizeWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010045#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000046#include "workloads/ClSpaceToBatchNdWorkload.hpp"
James Conroyd2aa85e2019-07-01 17:12:40 +010047#include "workloads/ClSpaceToDepthWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010048#include "workloads/ClSplitterWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000049#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010050#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010051#include "workloads/ClTransposeConvolution2dWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000052#endif
53
54using namespace boost;
55
56namespace armnn
57{
arovir017c22c702018-10-09 11:16:46 +010058
telsoa014fcda012018-03-09 14:13:49 +000059namespace
60{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010061
telsoa014fcda012018-03-09 14:13:49 +000062template<unsigned int FilterSize>
63bool IsMatchingSize2d(const TensorInfo& weightInfo)
64{
telsoa01c577f2c2018-08-31 09:22:23 +010065 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000066 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
67}
68
69template<uint32_t ValidStride>
70bool IsMatchingStride(uint32_t actualStride)
71{
72 return ValidStride == actualStride;
73}
74
75template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
76bool IsMatchingStride(uint32_t actualStride)
77{
78 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010079}
telsoa014fcda012018-03-09 14:13:49 +000080
arovir01085f0a42018-10-08 14:48:19 +010081bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000082{
Matteo Martincighd95e9062019-01-31 15:35:59 +000083#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000084 return true;
85#else
arovir01085f0a42018-10-08 14:48:19 +010086 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000087 {
arovir01085f0a42018-10-08 14:48:19 +010088 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000089 }
90 return false;
91#endif
92}
93
Matteo Martincighd95e9062019-01-31 15:35:59 +000094#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000095#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
96#else
97#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
98#endif
99
Matteo Martincighd95e9062019-01-31 15:35:59 +0000100#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +0000101template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100102inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000103{
104 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
105 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
106 if (!supported && reasonIfUnsupported)
107 {
arovir01085f0a42018-10-08 14:48:19 +0100108 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000109 }
110 return supported;
111}
112
113#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
114 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
115#else
116#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
117 return IsClBackendSupported(reasonIfUnsupported);
118#endif
119
telsoa01c577f2c2018-08-31 09:22:23 +0100120template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100121bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000122 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100123 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000124 Uint8Func uint8FuncPtr,
125 Params&&... params)
126{
127 return IsClBackendSupported(reasonIfUnsupported) &&
128 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
129 dataType,
130 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100131 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000132 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000133 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000134 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000135 std::forward<Params>(params)...);
136}
137
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100138} // anonymous namespace
139
140bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
141 const TensorInfo& output,
142 const ActivationDescriptor& descriptor,
143 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000144{
telsoa01c577f2c2018-08-31 09:22:23 +0100145 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
146 reasonIfUnsupported,
147 input,
148 output,
149 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000150}
151
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100152bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
153 const TensorInfo& input1,
154 const TensorInfo& output,
155 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000156{
arovir01085f0a42018-10-08 14:48:19 +0100157 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
158 reasonIfUnsupported,
159 input0,
160 input1,
161 output);
telsoa014fcda012018-03-09 14:13:49 +0000162}
163
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100164bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
165 const TensorInfo& output,
166 const TensorInfo& mean,
167 const TensorInfo& var,
168 const TensorInfo& beta,
169 const TensorInfo& gamma,
170 const BatchNormalizationDescriptor& descriptor,
171 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000172{
telsoa01c577f2c2018-08-31 09:22:23 +0100173 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
174 reasonIfUnsupported,
175 input,
176 output,
177 mean,
178 var,
179 beta,
180 gamma,
181 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000182}
183
Mike Kelly831faed2018-11-28 11:52:08 +0000184bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
185 const TensorInfo& output,
186 const BatchToSpaceNdDescriptor& descriptor,
187 Optional<std::string&> reasonIfUnsupported) const
188{
189 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
190 reasonIfUnsupported,
191 input,
192 output,
193 descriptor);
194}
195
Jim Flynn906f9462019-05-10 13:55:21 +0100196bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
197 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100198 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100199 Optional<std::string&> reasonIfUnsupported) const
200{
Jim Flynne242f2d2019-05-22 14:24:13 +0100201 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
202 {
203 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
204 return false;
205 }
206
207 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
208 if(concatInnerAxis < 3) // Width, height, or channels
209 {
210 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
211 reasonIfUnsupported,
212 inputs,
213 output,
214 descriptor);
215 }
216 else if (concatInnerAxis == 3)
217 {
218 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
219 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
220 for (auto& input : inputs)
221 {
222 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
223 {
224 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
225 return false;
226 }
227 }
228 return true; // Sub-tensors support concat along batch
229 }
230 else // > 4 dimensions not supported.
231 {
232 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
233 return false;
234 }
Jim Flynn906f9462019-05-10 13:55:21 +0100235}
236
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100237bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
238 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000239{
240 return IsSupportedForDataTypeCl(reasonIfUnsupported,
241 output.GetDataType(),
242 &TrueFunc<>,
243 &FalseFuncU8<>);
244}
245
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100246bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
247 const TensorInfo& output,
248 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000249{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100250 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
251 reasonIfUnsupported,
252 input,
253 output);
telsoa014fcda012018-03-09 14:13:49 +0000254}
255
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100256bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
257 const TensorInfo& output,
258 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000259{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100260 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
261 reasonIfUnsupported,
262 input,
263 output);
telsoa014fcda012018-03-09 14:13:49 +0000264}
265
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100266bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
267 const TensorInfo& output,
268 const Convolution2dDescriptor& descriptor,
269 const TensorInfo& weights,
270 const Optional<TensorInfo>& biases,
271 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000272{
surmeh013537c2c2018-05-18 16:31:43 +0100273 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
274 reasonIfUnsupported,
275 input,
276 output,
277 descriptor,
278 weights,
279 biases);
telsoa014fcda012018-03-09 14:13:49 +0000280}
281
Jim Flynn983daec2019-05-29 16:20:16 +0100282bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
283 const TensorInfo& output,
284 Optional<std::string&> reasonIfUnsupported) const
285{
286 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
287 reasonIfUnsupported,
288 input,
289 output);
290}
291
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100292bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
293 const TensorInfo& output,
294 const DepthwiseConvolution2dDescriptor& descriptor,
295 const TensorInfo& weights,
296 const Optional<TensorInfo>& biases,
297 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000298{
telsoa01c577f2c2018-08-31 09:22:23 +0100299 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
300 reasonIfUnsupported,
301 input,
302 output,
303 descriptor,
304 weights,
305 biases);
telsoa014fcda012018-03-09 14:13:49 +0000306}
307
Pablo Tellof0bd6832019-04-26 17:58:13 +0100308bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
309 const TensorInfo& output,
310 const DepthwiseConvolution2dDescriptor& descriptor,
311 const TensorInfo& weights,
312 const Optional<TensorInfo>& biases,
313 Optional<std::string&> reasonIfUnsupported) const
314{
315 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
316 reasonIfUnsupported,
317 input,
318 output,
319 descriptor,
320 weights,
321 biases);
322}
323
324
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100325bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
326 const TensorInfo& input1,
327 const TensorInfo& output,
328 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100329{
330 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
331 reasonIfUnsupported,
332 input0,
333 input1,
334 output);
335}
336
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100337bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
338 const TensorInfo& output,
339 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000340{
341 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100342 return IsClBackendSupported(reasonIfUnsupported) &&
343 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
344 input.GetDataType(),
345 &FalseFuncF16<>,
346 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000347 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000348 &FalseFuncI32<>,
349 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100350}
351
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100352bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
353 const TensorInfo& output,
354 const TensorInfo& weights,
355 const TensorInfo& biases,
356 const FullyConnectedDescriptor& descriptor,
357 Optional<std::string&> reasonIfUnsupported) const
358{
359 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
360 reasonIfUnsupported,
361 input,
362 output,
363 weights,
364 biases,
365 descriptor);
366}
367
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000368bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
369 const TensorInfo& input1,
370 const TensorInfo& output,
371 Optional<std::string&> reasonIfUnsupported) const
372{
373 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
374 reasonIfUnsupported,
375 input0,
376 input1,
377 output);
378}
379
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100380bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
381 Optional<std::string&> reasonIfUnsupported) const
382{
383 return IsSupportedForDataTypeCl(reasonIfUnsupported,
384 input.GetDataType(),
385 &TrueFunc<>,
386 &TrueFunc<>);
387}
388
389bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
390 const TensorInfo& output,
391 const L2NormalizationDescriptor& descriptor,
392 Optional<std::string&> reasonIfUnsupported) const
393{
394 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
395 reasonIfUnsupported,
396 input,
397 output,
398 descriptor);
399}
400
401bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
402 const TensorInfo& outputStateIn,
403 const TensorInfo& cellStateIn,
404 const TensorInfo& scratchBuffer,
405 const TensorInfo& outputStateOut,
406 const TensorInfo& cellStateOut,
407 const TensorInfo& output,
408 const LstmDescriptor& descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +0100409 const LstmInputParamsInfo& paramsInfo,
410 Optional<std::string&> reasonIfUnsupported) const
telsoa01c577f2c2018-08-31 09:22:23 +0100411{
arovir01085f0a42018-10-08 14:48:19 +0100412 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
413 reasonIfUnsupported,
414 input,
415 outputStateIn,
416 cellStateIn,
417 scratchBuffer,
418 outputStateOut,
419 cellStateOut,
420 output,
421 descriptor,
Jan Eilersd01a83c2019-07-03 18:20:40 +0100422 paramsInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100423}
424
keidav01a959ee52018-12-19 10:04:58 +0000425bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
426 const TensorInfo& input1,
427 const TensorInfo& output,
428 Optional<std::string&> reasonIfUnsupported) const
429{
430 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
431 reasonIfUnsupported,
432 input0,
433 input1,
434 output);
435}
436
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100437bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
438 const TensorInfo& output,
439 const MeanDescriptor& descriptor,
440 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100441{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100442 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
443 reasonIfUnsupported,
444 input,
445 output,
446 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100447}
448
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000449bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
450 const TensorInfo &output,
451 Optional<std::string &> reasonIfUnsupported) const
452{
453 ignore_unused(input);
454 ignore_unused(output);
455 return true;
456}
457
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100458bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000459 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100460 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100461 Optional<std::string&> reasonIfUnsupported) const
462{
Jim Flynne242f2d2019-05-22 14:24:13 +0100463 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100464}
465
saoste019292aa32019-01-08 13:55:59 +0000466bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
467 const TensorInfo& input1,
468 const TensorInfo& output,
469 Optional<std::string&> reasonIfUnsupported) const
470{
471 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
472 reasonIfUnsupported,
473 input0,
474 input1,
475 output);
476}
477
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100478bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
479 const TensorInfo& input1,
480 const TensorInfo& output,
481 Optional<std::string&> reasonIfUnsupported) const
482{
483 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
484 reasonIfUnsupported,
485 input0,
486 input1,
487 output);
488}
489
490bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
491 const TensorInfo& output,
492 const NormalizationDescriptor& descriptor,
493 Optional<std::string&> reasonIfUnsupported) const
494{
495 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
496}
497
498bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
499 Optional<std::string&> reasonIfUnsupported) const
500{
kevmay012b4d88e2019-01-24 14:05:09 +0000501 return IsClBackendSupported(reasonIfUnsupported) &&
502 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
503 output.GetDataType(),
504 &TrueFunc<>,
505 &TrueFunc<>,
506 &TrueFunc<>,
507 &FalseFuncI32<>,
508 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100509}
510
511bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
512 const TensorInfo& output,
513 const PadDescriptor& descriptor,
514 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100515{
516 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
517 reasonIfUnsupported,
518 input,
519 output,
520 descriptor);
521}
522
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100523bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
524 const TensorInfo& output,
525 const PermuteDescriptor& descriptor,
526 Optional<std::string&> reasonIfUnsupported) const
527{
528 ignore_unused(input);
529 ignore_unused(output);
530 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000531}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100532
533bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
534 const TensorInfo& output,
535 const Pooling2dDescriptor& descriptor,
536 Optional<std::string&> reasonIfUnsupported) const
537{
538 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
539}
540
Nikhil Raj91e4c6d2019-07-05 12:22:58 +0100541bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
542 const armnn::TensorInfo &alpha,
543 const armnn::TensorInfo &output,
544 armnn::Optional<std::string &> reasonIfUnsupported) const
545{
546 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
547}
548
Sadik Armagan20ec2492019-05-31 09:09:44 +0100549bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
550 const TensorInfo& output,
551 Optional<std::string&> reasonIfUnsupported) const
552{
553 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
554 reasonIfUnsupported,
555 input,
556 output);
557}
558
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100559bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000560 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100561 Optional<std::string&> reasonIfUnsupported) const
562{
563 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000564 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100565 ignore_unused(reasonIfUnsupported);
566 return true;
567}
568
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100569bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
570 const TensorInfo& output,
571 const ResizeDescriptor& descriptor,
572 Optional<std::string&> reasonIfUnsupported) const
573{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100574 FORWARD_WORKLOAD_VALIDATE_FUNC(ClResizeWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100575}
576
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100577bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000578 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100579 Optional<std::string&> reasonIfUnsupported) const
580{
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100581 ResizeDescriptor descriptor;
582 descriptor.m_Method = ResizeMethod::Bilinear;
583 descriptor.m_DataLayout = DataLayout::NCHW;
584
585 const TensorShape& outputShape = output.GetShape();
586 descriptor.m_TargetHeight = outputShape[2];
587 descriptor.m_TargetWidth = outputShape[3];
588
589 return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100590}
591
592bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
593 const TensorInfo& output,
594 const SoftmaxDescriptor& descriptor,
595 Optional<std::string&> reasonIfUnsupported) const
596{
Francis Murtagh3b938352019-07-26 15:44:17 +0100597 if (!(descriptor.m_Axis == 1 ||
598 (descriptor.m_Axis < 0 && static_cast<int>(input.GetNumDimensions()) + descriptor.m_Axis == 1)))
599 {
600 SetValueChecked(reasonIfUnsupported, "Cl Softmax: Only supports Axis equal to 1.");
601 return false;
602 }
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100603 ignore_unused(descriptor);
Francis Murtagh3b938352019-07-26 15:44:17 +0100604 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100605}
606
Sadik Armaganf4464322018-12-20 16:19:12 +0000607bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
608 const TensorInfo& output,
609 const SpaceToBatchNdDescriptor& descriptor,
610 Optional<std::string&> reasonIfUnsupported) const
611{
612 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
613 reasonIfUnsupported,
614 input,
615 output,
616 descriptor);
617}
618
James Conroyd2aa85e2019-07-01 17:12:40 +0100619bool ClLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
620 const TensorInfo& output,
621 const SpaceToDepthDescriptor& descriptor,
622 Optional<std::string&> reasonIfUnsupported) const
623{
624 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToDepthWorkloadValidate,
625 reasonIfUnsupported,
626 input,
627 output,
628 descriptor);
629}
630
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100631bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
632 const ViewsDescriptor& descriptor,
633 Optional<std::string&> reasonIfUnsupported) const
634{
635 ignore_unused(descriptor);
636 return IsSupportedForDataTypeCl(reasonIfUnsupported,
637 input.GetDataType(),
638 &TrueFunc<>,
639 &TrueFunc<>);
640}
641
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100642bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
643 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
644 const ViewsDescriptor& descriptor,
645 Optional<std::string&> reasonIfUnsupported) const
646{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100647#if defined(ARMCOMPUTECL_ENABLED)
648 // Split along the last dimension, cannot use sub-tensors
649 // as width and height of the sub-tensors do not match
650 // the width and height of the parent tensor
651 // in case of input with more than 2D.
652 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
653 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
654 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
655 {
656 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
657 reasonIfUnsupported,
658 input,
659 outputs,
660 *splitAxis.begin());
661 }
662#endif
663 for (auto output : outputs)
664 {
665 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
666 {
667 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
668 return false;
669 }
670 }
671 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100672}
673
keidav01d74dc912018-12-10 18:16:07 +0000674bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
675 const TensorInfo& output,
676 const StridedSliceDescriptor& descriptor,
677 Optional<std::string&> reasonIfUnsupported) const
678{
679 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
680 reasonIfUnsupported,
681 input,
682 output,
683 descriptor);
684}
685
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100686bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
687 const TensorInfo& input1,
688 const TensorInfo& output,
689 Optional<std::string&> reasonIfUnsupported) const
690{
691 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
692 reasonIfUnsupported,
693 input0,
694 input1,
695 output);
696}
697
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +0100698bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
699 const TensorInfo& output,
700 const TransposeConvolution2dDescriptor& descriptor,
701 const TensorInfo& weights,
702 const Optional<TensorInfo>& biases,
703 Optional<std::string&> reasonIfUnsupported) const
704{
705 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
706 reasonIfUnsupported,
707 input,
708 output,
709 descriptor,
710 weights,
711 biases);
712}
713
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100714} // namespace armnn