blob: a6baa3c38f7243608416dd6f7655ccac58f5e1c8 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
telsoa014fcda012018-03-09 14:13:49 +00006#include "ClLayerSupport.hpp"
David Beck3e9e1152018-10-17 14:17:50 +01007#include "ClBackendId.hpp"
arovir017c22c702018-10-09 11:16:46 +01008
David Beck3cc9a622018-10-12 10:38:31 +01009#include <armnn/Descriptors.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000010#include <InternalTypes.hpp>
11#include <LayerSupportCommon.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
David Beck111b5d92018-11-12 14:59:37 +000013#include <backendsCommon/BackendRegistry.hpp>
David Beck3e9e1152018-10-17 14:17:50 +010014
telsoa014fcda012018-03-09 14:13:49 +000015#include <boost/core/ignore_unused.hpp>
16
Matteo Martincighd95e9062019-01-31 15:35:59 +000017#if defined(ARMCOMPUTECL_ENABLED)
Narumol Prangnawarat74135832019-05-23 15:07:33 +010018#include <aclCommon/ArmComputeUtils.hpp>
David Beckac42efd2018-09-26 17:41:13 +010019#include "workloads/ClAdditionWorkload.hpp"
Nattapat Chaimanowonge06757e2018-10-11 15:39:18 +010020#include "workloads/ClActivationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010021#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
Mike Kelly831faed2018-11-28 11:52:08 +000022#include "workloads/ClBatchToSpaceNdWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010023#include "workloads/ClConvertFp16ToFp32Workload.hpp"
24#include "workloads/ClConvertFp32ToFp16Workload.hpp"
Matthew Benthamd8067922018-10-03 17:18:04 +010025#include "workloads/ClConvolution2dWorkload.hpp"
Jim Flynn983daec2019-05-29 16:20:16 +010026#include "workloads/ClDequantizeWorkload.hpp"
Matthew Benthamd8777392018-10-08 09:38:55 +010027#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010028#include "workloads/ClDivisionFloatWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010029#include "workloads/ClFullyConnectedWorkload.hpp"
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +000030#include "workloads/ClGreaterWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010031#include "workloads/ClL2NormalizationFloatWorkload.hpp"
32#include "workloads/ClLstmFloatWorkload.hpp"
keidav01a959ee52018-12-19 10:04:58 +000033#include "workloads/ClMaximumWorkload.hpp"
Matteo Martincigh28dcab62018-10-19 16:40:03 +010034#include "workloads/ClMeanWorkload.hpp"
Jim Flynn69059412019-05-17 13:03:57 +010035#include "workloads/ClConcatWorkload.hpp"
saoste019292aa32019-01-08 13:55:59 +000036#include "workloads/ClMinimumWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010037#include "workloads/ClMultiplicationWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010038#include "workloads/ClNormalizationFloatWorkload.hpp"
arovir01085f0a42018-10-08 14:48:19 +010039#include "workloads/ClPadWorkload.hpp"
40#include "workloads/ClPermuteWorkload.hpp"
Nattapat Chaimanowongac9e0962018-10-10 17:18:35 +010041#include "workloads/ClPooling2dWorkload.hpp"
Nikhil Raj91e4c6d2019-07-05 12:22:58 +010042#include "workloads/ClPreluWorkload.hpp"
Sadik Armagan20ec2492019-05-31 09:09:44 +010043#include "workloads/ClQuantizeWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010044#include "workloads/ClSoftmaxBaseWorkload.hpp"
Sadik Armaganf4464322018-12-20 16:19:12 +000045#include "workloads/ClSpaceToBatchNdWorkload.hpp"
Narumol Prangnawarat74135832019-05-23 15:07:33 +010046#include "workloads/ClSplitterWorkload.hpp"
keidav01d74dc912018-12-10 18:16:07 +000047#include "workloads/ClStridedSliceWorkload.hpp"
David Beckac42efd2018-09-26 17:41:13 +010048#include "workloads/ClSubtractionWorkload.hpp"
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +010049#include "workloads/ClTransposeConvolution2dWorkload.hpp"
telsoa014fcda012018-03-09 14:13:49 +000050#endif
51
52using namespace boost;
53
54namespace armnn
55{
arovir017c22c702018-10-09 11:16:46 +010056
telsoa014fcda012018-03-09 14:13:49 +000057namespace
58{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010059
telsoa014fcda012018-03-09 14:13:49 +000060template<unsigned int FilterSize>
61bool IsMatchingSize2d(const TensorInfo& weightInfo)
62{
telsoa01c577f2c2018-08-31 09:22:23 +010063 // Width & Height must match.
telsoa014fcda012018-03-09 14:13:49 +000064 return (weightInfo.GetShape()[3] == FilterSize) && (weightInfo.GetShape()[2] == FilterSize);
65}
66
67template<uint32_t ValidStride>
68bool IsMatchingStride(uint32_t actualStride)
69{
70 return ValidStride == actualStride;
71}
72
73template<uint32_t FirstStride, uint32_t SecondStride, uint32_t... ValidStrides>
74bool IsMatchingStride(uint32_t actualStride)
75{
76 return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +010077}
telsoa014fcda012018-03-09 14:13:49 +000078
arovir01085f0a42018-10-08 14:48:19 +010079bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000080{
Matteo Martincighd95e9062019-01-31 15:35:59 +000081#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000082 return true;
83#else
arovir01085f0a42018-10-08 14:48:19 +010084 if (reasonIfUnsupported)
telsoa014fcda012018-03-09 14:13:49 +000085 {
arovir01085f0a42018-10-08 14:48:19 +010086 reasonIfUnsupported.value() = "The armnn library has been built without CL support";
telsoa014fcda012018-03-09 14:13:49 +000087 }
88 return false;
89#endif
90}
91
Matteo Martincighd95e9062019-01-31 15:35:59 +000092#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000093#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) (expr)
94#else
95#define FORWARD_CL_LAYER_SUPPORT_FUNC(expr) IsClBackendSupported(reasonIfUnsupported)
96#endif
97
Matteo Martincighd95e9062019-01-31 15:35:59 +000098#if defined(ARMCOMPUTECL_ENABLED)
telsoa014fcda012018-03-09 14:13:49 +000099template<class FuncType, class... Args>
arovir01085f0a42018-10-08 14:48:19 +0100100inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIfUnsupported, Args&&... args)
telsoa014fcda012018-03-09 14:13:49 +0000101{
102 arm_compute::Status aclStatus = func(std::forward<Args>(args)...);
103 const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
104 if (!supported && reasonIfUnsupported)
105 {
arovir01085f0a42018-10-08 14:48:19 +0100106 reasonIfUnsupported.value() = aclStatus.error_description();
telsoa014fcda012018-03-09 14:13:49 +0000107 }
108 return supported;
109}
110
111#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
112 return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
113#else
114#define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
115 return IsClBackendSupported(reasonIfUnsupported);
116#endif
117
telsoa01c577f2c2018-08-31 09:22:23 +0100118template<typename FloatFunc, typename Uint8Func, typename ... Params>
arovir01085f0a42018-10-08 14:48:19 +0100119bool IsSupportedForDataTypeCl(Optional<std::string&> reasonIfUnsupported,
telsoa014fcda012018-03-09 14:13:49 +0000120 DataType dataType,
telsoa01c577f2c2018-08-31 09:22:23 +0100121 FloatFunc floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000122 Uint8Func uint8FuncPtr,
123 Params&&... params)
124{
125 return IsClBackendSupported(reasonIfUnsupported) &&
126 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
127 dataType,
128 floatFuncPtr,
telsoa01c577f2c2018-08-31 09:22:23 +0100129 floatFuncPtr,
telsoa014fcda012018-03-09 14:13:49 +0000130 uint8FuncPtr,
narpra01db2b1602019-01-23 15:23:11 +0000131 &FalseFunc<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000132 &FalseFunc<>,
telsoa014fcda012018-03-09 14:13:49 +0000133 std::forward<Params>(params)...);
134}
135
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100136} // anonymous namespace
137
138bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
139 const TensorInfo& output,
140 const ActivationDescriptor& descriptor,
141 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000142{
telsoa01c577f2c2018-08-31 09:22:23 +0100143 FORWARD_WORKLOAD_VALIDATE_FUNC(ClActivationWorkloadValidate,
144 reasonIfUnsupported,
145 input,
146 output,
147 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000148}
149
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100150bool ClLayerSupport::IsAdditionSupported(const TensorInfo& input0,
151 const TensorInfo& input1,
152 const TensorInfo& output,
153 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000154{
arovir01085f0a42018-10-08 14:48:19 +0100155 FORWARD_WORKLOAD_VALIDATE_FUNC(ClAdditionValidate,
156 reasonIfUnsupported,
157 input0,
158 input1,
159 output);
telsoa014fcda012018-03-09 14:13:49 +0000160}
161
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100162bool ClLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
163 const TensorInfo& output,
164 const TensorInfo& mean,
165 const TensorInfo& var,
166 const TensorInfo& beta,
167 const TensorInfo& gamma,
168 const BatchNormalizationDescriptor& descriptor,
169 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000170{
telsoa01c577f2c2018-08-31 09:22:23 +0100171 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchNormalizationValidate,
172 reasonIfUnsupported,
173 input,
174 output,
175 mean,
176 var,
177 beta,
178 gamma,
179 descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000180}
181
Mike Kelly831faed2018-11-28 11:52:08 +0000182bool ClLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
183 const TensorInfo& output,
184 const BatchToSpaceNdDescriptor& descriptor,
185 Optional<std::string&> reasonIfUnsupported) const
186{
187 FORWARD_WORKLOAD_VALIDATE_FUNC(ClBatchToSpaceNdWorkloadValidate,
188 reasonIfUnsupported,
189 input,
190 output,
191 descriptor);
192}
193
Jim Flynn906f9462019-05-10 13:55:21 +0100194bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
195 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100196 const ConcatDescriptor& descriptor,
Jim Flynn906f9462019-05-10 13:55:21 +0100197 Optional<std::string&> reasonIfUnsupported) const
198{
Jim Flynne242f2d2019-05-22 14:24:13 +0100199 if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
200 {
201 SetValueChecked(reasonIfUnsupported, "Cl Concat: Concat axis > Number of dimensions.");
202 return false;
203 }
204
205 unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
206 if(concatInnerAxis < 3) // Width, height, or channels
207 {
208 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConcatWorkloadValidate,
209 reasonIfUnsupported,
210 inputs,
211 output,
212 descriptor);
213 }
214 else if (concatInnerAxis == 3)
215 {
216 // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
217 // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
218 for (auto& input : inputs)
219 {
220 if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
221 {
222 SetValueChecked(reasonIfUnsupported, "Cl Concat: Types and quantization parameters must match.");
223 return false;
224 }
225 }
226 return true; // Sub-tensors support concat along batch
227 }
228 else // > 4 dimensions not supported.
229 {
230 SetValueChecked(reasonIfUnsupported, "Cl Concat: Maximum of 4 dimensions supported.");
231 return false;
232 }
Jim Flynn906f9462019-05-10 13:55:21 +0100233}
234
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100235bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
236 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000237{
238 return IsSupportedForDataTypeCl(reasonIfUnsupported,
239 output.GetDataType(),
240 &TrueFunc<>,
241 &FalseFuncU8<>);
242}
243
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100244bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
245 const TensorInfo& output,
246 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000247{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100248 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp16ToFp32WorkloadValidate,
249 reasonIfUnsupported,
250 input,
251 output);
telsoa014fcda012018-03-09 14:13:49 +0000252}
253
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100254bool ClLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
255 const TensorInfo& output,
256 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000257{
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100258 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvertFp32ToFp16WorkloadValidate,
259 reasonIfUnsupported,
260 input,
261 output);
telsoa014fcda012018-03-09 14:13:49 +0000262}
263
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100264bool ClLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
265 const TensorInfo& output,
266 const Convolution2dDescriptor& descriptor,
267 const TensorInfo& weights,
268 const Optional<TensorInfo>& biases,
269 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000270{
surmeh013537c2c2018-05-18 16:31:43 +0100271 FORWARD_WORKLOAD_VALIDATE_FUNC(ClConvolution2dWorkloadValidate,
272 reasonIfUnsupported,
273 input,
274 output,
275 descriptor,
276 weights,
277 biases);
telsoa014fcda012018-03-09 14:13:49 +0000278}
279
Jim Flynn983daec2019-05-29 16:20:16 +0100280bool ClLayerSupport::IsDequantizeSupported(const TensorInfo& input,
281 const TensorInfo& output,
282 Optional<std::string&> reasonIfUnsupported) const
283{
284 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDequantizeWorkloadValidate,
285 reasonIfUnsupported,
286 input,
287 output);
288}
289
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100290bool ClLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
291 const TensorInfo& output,
292 const DepthwiseConvolution2dDescriptor& descriptor,
293 const TensorInfo& weights,
294 const Optional<TensorInfo>& biases,
295 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000296{
telsoa01c577f2c2018-08-31 09:22:23 +0100297 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
298 reasonIfUnsupported,
299 input,
300 output,
301 descriptor,
302 weights,
303 biases);
telsoa014fcda012018-03-09 14:13:49 +0000304}
305
Pablo Tellof0bd6832019-04-26 17:58:13 +0100306bool ClLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
307 const TensorInfo& output,
308 const DepthwiseConvolution2dDescriptor& descriptor,
309 const TensorInfo& weights,
310 const Optional<TensorInfo>& biases,
311 Optional<std::string&> reasonIfUnsupported) const
312{
313 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDepthwiseConvolutionWorkloadValidate,
314 reasonIfUnsupported,
315 input,
316 output,
317 descriptor,
318 weights,
319 biases);
320}
321
322
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100323bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
324 const TensorInfo& input1,
325 const TensorInfo& output,
326 Optional<std::string&> reasonIfUnsupported) const
Francis Murtaghe7a86a42018-08-29 12:42:10 +0100327{
328 FORWARD_WORKLOAD_VALIDATE_FUNC(ClDivisionWorkloadValidate,
329 reasonIfUnsupported,
330 input0,
331 input1,
332 output);
333}
334
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100335bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
336 const TensorInfo& output,
337 Optional<std::string&> reasonIfUnsupported) const
telsoa014fcda012018-03-09 14:13:49 +0000338{
339 ignore_unused(output);
telsoa01c577f2c2018-08-31 09:22:23 +0100340 return IsClBackendSupported(reasonIfUnsupported) &&
341 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
342 input.GetDataType(),
343 &FalseFuncF16<>,
344 &TrueFunc<>,
narpra01db2b1602019-01-23 15:23:11 +0000345 &FalseFuncU8<>,
kevmay012b4d88e2019-01-24 14:05:09 +0000346 &FalseFuncI32<>,
347 &FalseFuncU8<>);
telsoa01c577f2c2018-08-31 09:22:23 +0100348}
349
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100350bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
351 const TensorInfo& output,
352 const TensorInfo& weights,
353 const TensorInfo& biases,
354 const FullyConnectedDescriptor& descriptor,
355 Optional<std::string&> reasonIfUnsupported) const
356{
357 FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
358 reasonIfUnsupported,
359 input,
360 output,
361 weights,
362 biases,
363 descriptor);
364}
365
Nattapat Chaimanowongc6a41ff2019-01-29 09:56:02 +0000366bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
367 const TensorInfo& input1,
368 const TensorInfo& output,
369 Optional<std::string&> reasonIfUnsupported) const
370{
371 FORWARD_WORKLOAD_VALIDATE_FUNC(ClGreaterWorkloadValidate,
372 reasonIfUnsupported,
373 input0,
374 input1,
375 output);
376}
377
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100378bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
379 Optional<std::string&> reasonIfUnsupported) const
380{
381 return IsSupportedForDataTypeCl(reasonIfUnsupported,
382 input.GetDataType(),
383 &TrueFunc<>,
384 &TrueFunc<>);
385}
386
387bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
388 const TensorInfo& output,
389 const L2NormalizationDescriptor& descriptor,
390 Optional<std::string&> reasonIfUnsupported) const
391{
392 FORWARD_WORKLOAD_VALIDATE_FUNC(ClL2NormalizationWorkloadValidate,
393 reasonIfUnsupported,
394 input,
395 output,
396 descriptor);
397}
398
399bool ClLayerSupport::IsLstmSupported(const TensorInfo& input,
400 const TensorInfo& outputStateIn,
401 const TensorInfo& cellStateIn,
402 const TensorInfo& scratchBuffer,
403 const TensorInfo& outputStateOut,
404 const TensorInfo& cellStateOut,
405 const TensorInfo& output,
406 const LstmDescriptor& descriptor,
407 const TensorInfo& inputToForgetWeights,
408 const TensorInfo& inputToCellWeights,
409 const TensorInfo& inputToOutputWeights,
410 const TensorInfo& recurrentToForgetWeights,
411 const TensorInfo& recurrentToCellWeights,
412 const TensorInfo& recurrentToOutputWeights,
413 const TensorInfo& forgetGateBias,
414 const TensorInfo& cellBias,
415 const TensorInfo& outputGateBias,
416 const TensorInfo* inputToInputWeights,
417 const TensorInfo* recurrentToInputWeights,
418 const TensorInfo* cellToInputWeights,
419 const TensorInfo* inputGateBias,
420 const TensorInfo* projectionWeights,
421 const TensorInfo* projectionBias,
422 const TensorInfo* cellToForgetWeights,
423 const TensorInfo* cellToOutputWeights,
Jan Eilers38e05bd2019-06-26 13:10:09 +0100424 Optional<std::string&> reasonIfUnsupported,
425 const TensorInfo* inputLayerNormWeights,
426 const TensorInfo* forgetLayerNormWeights,
427 const TensorInfo* cellLayerNormWeights,
428 const TensorInfo* outputLayerNormWeights) const
telsoa01c577f2c2018-08-31 09:22:23 +0100429{
arovir01085f0a42018-10-08 14:48:19 +0100430 FORWARD_WORKLOAD_VALIDATE_FUNC(ClLstmFloatWorkloadValidate,
431 reasonIfUnsupported,
432 input,
433 outputStateIn,
434 cellStateIn,
435 scratchBuffer,
436 outputStateOut,
437 cellStateOut,
438 output,
439 descriptor,
440 inputToForgetWeights,
441 inputToCellWeights,
442 inputToOutputWeights,
443 recurrentToForgetWeights,
444 recurrentToCellWeights,
445 recurrentToOutputWeights,
446 forgetGateBias,
447 cellBias,
448 outputGateBias,
449 inputToInputWeights,
450 recurrentToInputWeights,
451 cellToInputWeights,
452 inputGateBias,
453 projectionWeights,
454 projectionBias,
455 cellToForgetWeights,
456 cellToOutputWeights);
telsoa01c577f2c2018-08-31 09:22:23 +0100457}
458
keidav01a959ee52018-12-19 10:04:58 +0000459bool ClLayerSupport::IsMaximumSupported(const TensorInfo& input0,
460 const TensorInfo& input1,
461 const TensorInfo& output,
462 Optional<std::string&> reasonIfUnsupported) const
463{
464 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMaximumWorkloadValidate,
465 reasonIfUnsupported,
466 input0,
467 input1,
468 output);
469}
470
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100471bool ClLayerSupport::IsMeanSupported(const TensorInfo& input,
472 const TensorInfo& output,
473 const MeanDescriptor& descriptor,
474 Optional<std::string&> reasonIfUnsupported) const
narpra0132b90462018-09-13 11:07:48 +0100475{
Matteo Martincigh28dcab62018-10-19 16:40:03 +0100476 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMeanValidate,
477 reasonIfUnsupported,
478 input,
479 output,
480 descriptor);
narpra0132b90462018-09-13 11:07:48 +0100481}
482
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000483bool ClLayerSupport::IsMemCopySupported(const TensorInfo &input,
484 const TensorInfo &output,
485 Optional<std::string &> reasonIfUnsupported) const
486{
487 ignore_unused(input);
488 ignore_unused(output);
489 return true;
490}
491
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100492bool ClLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
Nikhil Raj8599a412018-11-19 14:51:07 +0000493 const TensorInfo& output,
Jim Flynne242f2d2019-05-22 14:24:13 +0100494 const MergerDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100495 Optional<std::string&> reasonIfUnsupported) const
496{
Jim Flynne242f2d2019-05-22 14:24:13 +0100497 return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100498}
499
saoste019292aa32019-01-08 13:55:59 +0000500bool ClLayerSupport::IsMinimumSupported(const TensorInfo& input0,
501 const TensorInfo& input1,
502 const TensorInfo& output,
503 Optional<std::string&> reasonIfUnsupported) const
504{
505 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMinimumWorkloadValidate,
506 reasonIfUnsupported,
507 input0,
508 input1,
509 output);
510}
511
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100512bool ClLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
513 const TensorInfo& input1,
514 const TensorInfo& output,
515 Optional<std::string&> reasonIfUnsupported) const
516{
517 FORWARD_WORKLOAD_VALIDATE_FUNC(ClMultiplicationWorkloadValidate,
518 reasonIfUnsupported,
519 input0,
520 input1,
521 output);
522}
523
524bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
525 const TensorInfo& output,
526 const NormalizationDescriptor& descriptor,
527 Optional<std::string&> reasonIfUnsupported) const
528{
529 FORWARD_WORKLOAD_VALIDATE_FUNC(ClNormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
530}
531
532bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
533 Optional<std::string&> reasonIfUnsupported) const
534{
kevmay012b4d88e2019-01-24 14:05:09 +0000535 return IsClBackendSupported(reasonIfUnsupported) &&
536 IsSupportedForDataTypeGeneric(reasonIfUnsupported,
537 output.GetDataType(),
538 &TrueFunc<>,
539 &TrueFunc<>,
540 &TrueFunc<>,
541 &FalseFuncI32<>,
542 &TrueFunc<>);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100543}
544
545bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
546 const TensorInfo& output,
547 const PadDescriptor& descriptor,
548 Optional<std::string&> reasonIfUnsupported) const
arovir01085f0a42018-10-08 14:48:19 +0100549{
550 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPadValidate,
551 reasonIfUnsupported,
552 input,
553 output,
554 descriptor);
555}
556
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100557bool ClLayerSupport::IsPermuteSupported(const TensorInfo& input,
558 const TensorInfo& output,
559 const PermuteDescriptor& descriptor,
560 Optional<std::string&> reasonIfUnsupported) const
561{
562 ignore_unused(input);
563 ignore_unused(output);
564 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPermuteWorkloadValidate, reasonIfUnsupported, descriptor);
telsoa014fcda012018-03-09 14:13:49 +0000565}
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100566
567bool ClLayerSupport::IsPooling2dSupported(const TensorInfo& input,
568 const TensorInfo& output,
569 const Pooling2dDescriptor& descriptor,
570 Optional<std::string&> reasonIfUnsupported) const
571{
572 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPooling2dWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
573}
574
Nikhil Raj91e4c6d2019-07-05 12:22:58 +0100575bool ClLayerSupport::IsPreluSupported(const armnn::TensorInfo &input,
576 const armnn::TensorInfo &alpha,
577 const armnn::TensorInfo &output,
578 armnn::Optional<std::string &> reasonIfUnsupported) const
579{
580 FORWARD_WORKLOAD_VALIDATE_FUNC(ClPreluWorkloadValidate, reasonIfUnsupported, input, alpha, output);
581}
582
Sadik Armagan20ec2492019-05-31 09:09:44 +0100583bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
584 const TensorInfo& output,
585 Optional<std::string&> reasonIfUnsupported) const
586{
587 FORWARD_WORKLOAD_VALIDATE_FUNC(ClQuantizeWorkloadValidate,
588 reasonIfUnsupported,
589 input,
590 output);
591}
592
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100593bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000594 const ReshapeDescriptor& descriptor,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100595 Optional<std::string&> reasonIfUnsupported) const
596{
597 ignore_unused(input);
Matteo Martincigh992d6dc2019-01-10 17:34:20 +0000598 ignore_unused(descriptor);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100599 ignore_unused(reasonIfUnsupported);
600 return true;
601}
602
Aron Virginas-Tar169d2f12019-07-01 19:01:44 +0100603bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
604 const TensorInfo& output,
605 const ResizeDescriptor& descriptor,
606 Optional<std::string&> reasonIfUnsupported) const
607{
608 ignore_unused(output);
609
610 if (descriptor.m_Method == ResizeMethod::Bilinear)
611 {
612 return IsSupportedForDataTypeCl(reasonIfUnsupported,
613 input.GetDataType(),
614 &TrueFunc<>,
615 &FalseFuncU8<>);
616 }
617
618 return false;
619}
620
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100621bool ClLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
Sadik Armaganc625f002018-12-17 11:32:16 +0000622 const TensorInfo& output,
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100623 Optional<std::string&> reasonIfUnsupported) const
624{
Sadik Armaganc625f002018-12-17 11:32:16 +0000625 ignore_unused(output);
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100626 return IsSupportedForDataTypeCl(reasonIfUnsupported,
627 input.GetDataType(),
628 &TrueFunc<>,
629 &FalseFuncU8<>);
630}
631
632bool ClLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
633 const TensorInfo& output,
634 const SoftmaxDescriptor& descriptor,
635 Optional<std::string&> reasonIfUnsupported) const
636{
637 ignore_unused(descriptor);
638 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSoftmaxWorkloadValidate, reasonIfUnsupported, input, output);
639}
640
Sadik Armaganf4464322018-12-20 16:19:12 +0000641bool ClLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
642 const TensorInfo& output,
643 const SpaceToBatchNdDescriptor& descriptor,
644 Optional<std::string&> reasonIfUnsupported) const
645{
646 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSpaceToBatchNdWorkloadValidate,
647 reasonIfUnsupported,
648 input,
649 output,
650 descriptor);
651}
652
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100653bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
654 const ViewsDescriptor& descriptor,
655 Optional<std::string&> reasonIfUnsupported) const
656{
657 ignore_unused(descriptor);
658 return IsSupportedForDataTypeCl(reasonIfUnsupported,
659 input.GetDataType(),
660 &TrueFunc<>,
661 &TrueFunc<>);
662}
663
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100664bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
665 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
666 const ViewsDescriptor& descriptor,
667 Optional<std::string&> reasonIfUnsupported) const
668{
Narumol Prangnawarat74135832019-05-23 15:07:33 +0100669#if defined(ARMCOMPUTECL_ENABLED)
670 // Split along the last dimension, cannot use sub-tensors
671 // as width and height of the sub-tensors do not match
672 // the width and height of the parent tensor
673 // in case of input with more than 2D.
674 std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
675 if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
676 *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
677 {
678 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
679 reasonIfUnsupported,
680 input,
681 outputs,
682 *splitAxis.begin());
683 }
684#endif
685 for (auto output : outputs)
686 {
687 if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
688 {
689 SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
690 return false;
691 }
692 }
693 return true;
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100694}
695
keidav01d74dc912018-12-10 18:16:07 +0000696bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
697 const TensorInfo& output,
698 const StridedSliceDescriptor& descriptor,
699 Optional<std::string&> reasonIfUnsupported) const
700{
701 FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
702 reasonIfUnsupported,
703 input,
704 output,
705 descriptor);
706}
707
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100708bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
709 const TensorInfo& input1,
710 const TensorInfo& output,
711 Optional<std::string&> reasonIfUnsupported) const
712{
713 FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
714 reasonIfUnsupported,
715 input0,
716 input1,
717 output);
718}
719
Aron Virginas-Tar7a3e2fe2019-06-27 18:54:47 +0100720bool ClLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
721 const TensorInfo& output,
722 const TransposeConvolution2dDescriptor& descriptor,
723 const TensorInfo& weights,
724 const Optional<TensorInfo>& biases,
725 Optional<std::string&> reasonIfUnsupported) const
726{
727 FORWARD_WORKLOAD_VALIDATE_FUNC(ClTransposeConvolution2dWorkloadValidate,
728 reasonIfUnsupported,
729 input,
730 output,
731 descriptor,
732 weights,
733 biases);
734}
735
Aron Virginas-Tarbcf9f162018-10-15 11:47:37 +0100736} // namespace armnn